summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.mailmap1
-rw-r--r--Documentation/core-api/xarray.rst14
-rw-r--r--Documentation/devicetree/bindings/arm/msm/qcom,idle-state.txt2
-rw-r--r--Documentation/devicetree/bindings/arm/psci.yaml2
-rw-r--r--Documentation/devicetree/bindings/cpu/idle-states.yaml (renamed from Documentation/devicetree/bindings/arm/idle-states.yaml)228
-rw-r--r--Documentation/devicetree/bindings/input/mediatek,mt6779-keypad.yaml77
-rw-r--r--Documentation/devicetree/bindings/input/mtk-pmic-keys.txt5
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/imagis,ist3038c.yaml74
-rw-r--r--Documentation/devicetree/bindings/riscv/cpus.yaml8
-rw-r--r--Documentation/devicetree/bindings/rtc/allwinner,sun6i-a31-rtc.yaml84
-rw-r--r--Documentation/devicetree/bindings/rtc/atmel,at91sam9-rtc.txt25
-rw-r--r--Documentation/devicetree/bindings/rtc/atmel,at91sam9260-rtt.yaml69
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.yaml2
-rw-r--r--Documentation/filesystems/cifs/ksmbd.rst4
-rw-r--r--Documentation/filesystems/fsverity.rst6
-rw-r--r--Documentation/filesystems/locking.rst6
-rw-r--r--Documentation/filesystems/vfs.rst11
-rw-r--r--Documentation/riscv/index.rst1
-rw-r--r--Documentation/virt/kvm/api.rst61
-rw-r--r--Documentation/virt/kvm/index.rst26
-rw-r--r--Documentation/virt/kvm/locking.rst43
-rw-r--r--Documentation/virt/kvm/s390/index.rst12
-rw-r--r--Documentation/virt/kvm/s390/s390-diag.rst (renamed from Documentation/virt/kvm/s390-diag.rst)0
-rw-r--r--Documentation/virt/kvm/s390/s390-pv-boot.rst (renamed from Documentation/virt/kvm/s390-pv-boot.rst)0
-rw-r--r--Documentation/virt/kvm/s390/s390-pv.rst (renamed from Documentation/virt/kvm/s390-pv.rst)0
-rw-r--r--Documentation/virt/kvm/vcpu-requests.rst10
-rw-r--r--Documentation/virt/kvm/x86/amd-memory-encryption.rst (renamed from Documentation/virt/kvm/amd-memory-encryption.rst)0
-rw-r--r--Documentation/virt/kvm/x86/cpuid.rst (renamed from Documentation/virt/kvm/cpuid.rst)0
-rw-r--r--Documentation/virt/kvm/x86/errata.rst39
-rw-r--r--Documentation/virt/kvm/x86/halt-polling.rst (renamed from Documentation/virt/kvm/halt-polling.rst)0
-rw-r--r--Documentation/virt/kvm/x86/hypercalls.rst (renamed from Documentation/virt/kvm/hypercalls.rst)0
-rw-r--r--Documentation/virt/kvm/x86/index.rst19
-rw-r--r--Documentation/virt/kvm/x86/mmu.rst (renamed from Documentation/virt/kvm/mmu.rst)0
-rw-r--r--Documentation/virt/kvm/x86/msr.rst (renamed from Documentation/virt/kvm/msr.rst)0
-rw-r--r--Documentation/virt/kvm/x86/nested-vmx.rst (renamed from Documentation/virt/kvm/nested-vmx.rst)0
-rw-r--r--Documentation/virt/kvm/x86/running-nested-guests.rst (renamed from Documentation/virt/kvm/running-nested-guests.rst)0
-rw-r--r--Documentation/virt/kvm/x86/timekeeping.rst (renamed from Documentation/virt/kvm/timekeeping.rst)0
-rw-r--r--Documentation/vm/page_owner.rst1
-rw-r--r--Documentation/vm/unevictable-lru.rst471
-rw-r--r--MAINTAINERS34
-rw-r--r--arch/alpha/include/asm/user.h6
-rw-r--r--arch/arm/boot/dts/spear1340.dtsi6
-rw-r--r--arch/arm/boot/dts/spear13xx.dtsi6
-rw-r--r--arch/arm/include/asm/user.h4
-rw-r--r--arch/arm/mach-omap2/omap-secure.c13
-rw-r--r--arch/arm64/boot/dts/amd/Makefile4
-rw-r--r--arch/arm64/boot/dts/amd/amd-overdrive-rev-b0.dts13
-rw-r--r--arch/arm64/boot/dts/amd/amd-overdrive-rev-b1.dts1
-rw-r--r--arch/arm64/boot/dts/amd/amd-overdrive.dts66
-rw-r--r--arch/arm64/boot/dts/amd/amd-seattle-cpus.dtsi224
-rw-r--r--arch/arm64/boot/dts/amd/amd-seattle-soc.dtsi70
-rw-r--r--arch/arm64/boot/dts/amd/amd-seattle-xgbe-b.dtsi22
-rw-r--r--arch/arm64/boot/dts/amd/husky.dts84
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi6
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi6
-rw-r--r--arch/h8300/include/asm/user.h4
-rw-r--r--arch/ia64/include/asm/user.h6
-rw-r--r--arch/m68k/include/asm/user.h4
-rw-r--r--arch/mips/crypto/crc32-mips.c46
-rw-r--r--arch/mips/include/asm/mach-rc32434/rb.h9
-rw-r--r--arch/mips/lantiq/falcon/sysctrl.c2
-rw-r--r--arch/mips/lantiq/xway/gptu.c2
-rw-r--r--arch/mips/lantiq/xway/sysctrl.c46
-rw-r--r--arch/mips/rb532/gpio.c10
-rw-r--r--arch/mips/sgi-ip22/ip22-gio.c2
-rw-r--r--arch/powerpc/include/asm/user.h5
-rw-r--r--arch/riscv/Kconfig8
-rw-r--r--arch/riscv/Kconfig.socs3
-rw-r--r--arch/riscv/boot/dts/canaan/sipeed_maix_bit.dts2
-rw-r--r--arch/riscv/boot/dts/canaan/sipeed_maix_dock.dts2
-rw-r--r--arch/riscv/boot/dts/canaan/sipeed_maix_go.dts2
-rw-r--r--arch/riscv/boot/dts/canaan/sipeed_maixduino.dts2
-rw-r--r--arch/riscv/configs/defconfig5
-rw-r--r--arch/riscv/configs/nommu_k210_defconfig1
-rw-r--r--arch/riscv/configs/nommu_k210_sdcard_defconfig1
-rw-r--r--arch/riscv/configs/nommu_virt_defconfig1
-rw-r--r--arch/riscv/configs/rv32_defconfig5
-rw-r--r--arch/riscv/include/asm/asm.h26
-rw-r--r--arch/riscv/include/asm/cpuidle.h24
-rw-r--r--arch/riscv/include/asm/current.h2
-rw-r--r--arch/riscv/include/asm/module.lds.h6
-rw-r--r--arch/riscv/include/asm/suspend.h36
-rw-r--r--arch/riscv/include/asm/thread_info.h10
-rw-r--r--arch/riscv/kernel/Makefile2
-rw-r--r--arch/riscv/kernel/asm-offsets.c3
-rw-r--r--arch/riscv/kernel/cpu.c6
-rw-r--r--arch/riscv/kernel/cpu_ops_sbi.c2
-rw-r--r--arch/riscv/kernel/head.S27
-rw-r--r--arch/riscv/kernel/module.c4
-rw-r--r--arch/riscv/kernel/perf_callchain.c2
-rw-r--r--arch/riscv/kernel/process.c3
-rw-r--r--arch/riscv/kernel/stacktrace.c6
-rw-r--r--arch/riscv/kernel/suspend.c87
-rw-r--r--arch/riscv/kernel/suspend_entry.S124
-rw-r--r--arch/s390/Kconfig1
-rw-r--r--arch/s390/include/asm/alternative-asm.h12
-rw-r--r--arch/s390/include/asm/alternative.h15
-rw-r--r--arch/s390/include/asm/ap.h60
-rw-r--r--arch/s390/include/asm/ctl_reg.h16
-rw-r--r--arch/s390/include/asm/processor.h17
-rw-r--r--arch/s390/include/asm/spinlock.h2
-rw-r--r--arch/s390/include/asm/syscall_wrapper.h2
-rw-r--r--arch/s390/include/asm/unwind.h13
-rw-r--r--arch/s390/include/asm/user.h4
-rw-r--r--arch/s390/kernel/entry.S10
-rw-r--r--arch/s390/kernel/ipl.c4
-rw-r--r--arch/s390/kernel/kprobes.c8
-rw-r--r--arch/s390/kernel/machine_kexec.c2
-rw-r--r--arch/s390/kernel/os_info.c2
-rw-r--r--arch/s390/kernel/setup.c19
-rw-r--r--arch/s390/kernel/smp.c57
-rw-r--r--arch/s390/kernel/traps.c6
-rw-r--r--arch/s390/kernel/unwind_bc.c12
-rw-r--r--arch/s390/kvm/kvm-s390.c2
-rw-r--r--arch/s390/lib/spinlock.c4
-rw-r--r--arch/s390/lib/test_unwind.c58
-rw-r--r--arch/s390/pci/pci.c5
-rw-r--r--arch/s390/pci/pci_bus.h7
-rw-r--r--arch/s390/pci/pci_clp.c9
-rw-r--r--arch/s390/pci/pci_event.c7
-rw-r--r--arch/sh/include/asm/user.h6
-rw-r--r--arch/um/include/asm/Kbuild1
-rw-r--r--arch/x86/include/asm/kvm_host.h46
-rw-r--r--arch/x86/include/asm/svm.h14
-rw-r--r--arch/x86/include/asm/user_32.h4
-rw-r--r--arch/x86/include/asm/user_64.h4
-rw-r--r--arch/x86/kernel/kvm.c2
-rw-r--r--arch/x86/kvm/cpuid.c1
-rw-r--r--arch/x86/kvm/emulate.c8
-rw-r--r--arch/x86/kvm/hyperv.c22
-rw-r--r--arch/x86/kvm/i8254.c6
-rw-r--r--arch/x86/kvm/kvm_emulate.h3
-rw-r--r--arch/x86/kvm/lapic.c4
-rw-r--r--arch/x86/kvm/mmu.h32
-rw-r--r--arch/x86/kvm/mmu/mmu.c27
-rw-r--r--arch/x86/kvm/mmu/paging_tmpl.h82
-rw-r--r--arch/x86/kvm/mmu/tdp_mmu.c72
-rw-r--r--arch/x86/kvm/mmu/tdp_mmu.h12
-rw-r--r--arch/x86/kvm/pmu.c18
-rw-r--r--arch/x86/kvm/svm/avic.c14
-rw-r--r--arch/x86/kvm/svm/pmu.c9
-rw-r--r--arch/x86/kvm/svm/svm.c36
-rw-r--r--arch/x86/kvm/svm/svm.h15
-rw-r--r--arch/x86/kvm/svm/svm_onhyperv.c1
-rw-r--r--arch/x86/kvm/trace.h22
-rw-r--r--arch/x86/kvm/vmx/pmu_intel.c14
-rw-r--r--arch/x86/kvm/vmx/vmx.c26
-rw-r--r--arch/x86/kvm/x86.c161
-rw-r--r--arch/x86/kvm/xen.c7
-rw-r--r--arch/x86/lib/csum-partial_64.c26
-rw-r--r--arch/x86/um/Kconfig1
-rw-r--r--block/blk-cgroup.c32
-rw-r--r--block/blk-ioc.c3
-rw-r--r--block/blk-mq.c25
-rw-r--r--block/blk-wbt.h3
-rw-r--r--block/genhd.c2
-rw-r--r--drivers/block/drbd/drbd_main.c7
-rw-r--r--drivers/block/drbd/drbd_req.c45
-rw-r--r--drivers/block/loop.c1
-rw-r--r--drivers/block/n64cart.c2
-rw-r--r--drivers/block/xen-blkback/blkback.c2
-rw-r--r--drivers/block/xen-blkfront.c2
-rw-r--r--drivers/clk/sunxi-ng/Kconfig5
-rw-r--r--drivers/clk/sunxi-ng/Makefile2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun6i-rtc.c395
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun6i-rtc.h15
-rw-r--r--drivers/clk/sunxi-ng/ccu_common.h1
-rw-r--r--drivers/clk/sunxi-ng/ccu_mux.c7
-rw-r--r--drivers/cpuidle/Kconfig9
-rw-r--r--drivers/cpuidle/Kconfig.arm1
-rw-r--r--drivers/cpuidle/Kconfig.riscv15
-rw-r--r--drivers/cpuidle/Makefile5
-rw-r--r--drivers/cpuidle/cpuidle-psci-domain.c138
-rw-r--r--drivers/cpuidle/cpuidle-psci.h15
-rw-r--r--drivers/cpuidle/cpuidle-riscv-sbi.c627
-rw-r--r--drivers/cpuidle/dt_idle_genpd.c178
-rw-r--r--drivers/cpuidle/dt_idle_genpd.h50
-rw-r--r--drivers/gpio/gpio-ts4900.c12
-rw-r--r--drivers/gpio/gpio-ts5500.c4
-rw-r--r--drivers/hid/Kconfig11
-rw-r--r--drivers/hid/Makefile1
-rw-r--r--drivers/hid/hid-google-hammer.c51
-rw-r--r--drivers/hid/hid-vivaldi-common.c140
-rw-r--r--drivers/hid/hid-vivaldi-common.h16
-rw-r--r--drivers/hid/hid-vivaldi.c121
-rw-r--r--drivers/input/Kconfig7
-rw-r--r--drivers/input/Makefile1
-rw-r--r--drivers/input/input.c70
-rw-r--r--drivers/input/joystick/adi.c4
-rw-r--r--drivers/input/joystick/xpad.c2
-rw-r--r--drivers/input/keyboard/Kconfig14
-rw-r--r--drivers/input/keyboard/Makefile1
-rw-r--r--drivers/input/keyboard/atkbd.c27
-rw-r--r--drivers/input/keyboard/cros_ec_keyb.c43
-rw-r--r--drivers/input/keyboard/mt6779-keypad.c221
-rw-r--r--drivers/input/keyboard/mtk-pmic-keys.c55
-rw-r--r--drivers/input/misc/da9063_onkey.c13
-rw-r--r--drivers/input/mouse/synaptics.c1
-rw-r--r--drivers/input/serio/ps2-gpio.c195
-rw-r--r--drivers/input/touchscreen/Kconfig10
-rw-r--r--drivers/input/touchscreen/Makefile1
-rw-r--r--drivers/input/touchscreen/goodix.c61
-rw-r--r--drivers/input/touchscreen/goodix.h1
-rw-r--r--drivers/input/touchscreen/imagis.c367
-rw-r--r--drivers/input/touchscreen/iqs5xx.c16
-rw-r--r--drivers/input/touchscreen/stmfts.c8
-rw-r--r--drivers/input/touchscreen/tsc200x-core.c7
-rw-r--r--drivers/input/vivaldi-fmap.c39
-rw-r--r--drivers/md/dm-core.h4
-rw-r--r--drivers/md/dm-integrity.c6
-rw-r--r--drivers/md/dm-ioctl.c15
-rw-r--r--drivers/md/dm.c20
-rw-r--r--drivers/nvme/host/core.c38
-rw-r--r--drivers/nvme/host/multipath.c32
-rw-r--r--drivers/nvme/host/nvme.h23
-rw-r--r--drivers/nvme/host/pci.c7
-rw-r--r--drivers/nvme/target/admin-cmd.c2
-rw-r--r--drivers/nvme/target/configfs.c2
-rw-r--r--drivers/nvme/target/core.c26
-rw-r--r--drivers/nvme/target/fc.c8
-rw-r--r--drivers/nvme/target/fcloop.c16
-rw-r--r--drivers/nvme/target/io-cmd-file.c6
-rw-r--r--drivers/nvme/target/loop.c4
-rw-r--r--drivers/nvme/target/nvmet.h1
-rw-r--r--drivers/nvme/target/passthru.c2
-rw-r--r--drivers/nvme/target/rdma.c12
-rw-r--r--drivers/nvme/target/tcp.c10
-rw-r--r--drivers/pci/controller/pci-hyperv.c8
-rw-r--r--drivers/platform/chrome/Makefile3
-rw-r--r--drivers/platform/chrome/cros_ec_debugfs.c12
-rw-r--r--drivers/platform/chrome/cros_ec_sensorhub_ring.c3
-rw-r--r--drivers/platform/chrome/cros_ec_sensorhub_trace.h123
-rw-r--r--drivers/platform/chrome/cros_ec_trace.h95
-rw-r--r--drivers/platform/chrome/cros_ec_typec.c97
-rw-r--r--drivers/rtc/Kconfig10
-rw-r--r--drivers/rtc/Makefile1
-rw-r--r--drivers/rtc/class.c12
-rw-r--r--drivers/rtc/interface.c7
-rw-r--r--drivers/rtc/rtc-ds1307.c2
-rw-r--r--drivers/rtc/rtc-ds1685.c16
-rw-r--r--drivers/rtc/rtc-efi.c10
-rw-r--r--drivers/rtc/rtc-gamecube.c1
-rw-r--r--drivers/rtc/rtc-hym8563.c34
-rw-r--r--drivers/rtc/rtc-m41t80.c6
-rw-r--r--drivers/rtc/rtc-mc146818-lib.c22
-rw-r--r--drivers/rtc/rtc-mpc5121.c17
-rw-r--r--drivers/rtc/rtc-opal.c2
-rw-r--r--drivers/rtc/rtc-optee.c362
-rw-r--r--drivers/rtc/rtc-pcf2123.c3
-rw-r--r--drivers/rtc/rtc-pcf2127.c19
-rw-r--r--drivers/rtc/rtc-pcf85063.c3
-rw-r--r--drivers/rtc/rtc-pcf8523.c15
-rw-r--r--drivers/rtc/rtc-pcf8563.c16
-rw-r--r--drivers/rtc/rtc-pl031.c6
-rw-r--r--drivers/rtc/rtc-pm8xxx.c33
-rw-r--r--drivers/rtc/rtc-spear.c25
-rw-r--r--drivers/rtc/rtc-sun6i.c184
-rw-r--r--drivers/rtc/rtc-wm8350.c11
-rw-r--r--drivers/rtc/rtc-xgene.c2
-rw-r--r--drivers/s390/char/sclp.c4
-rw-r--r--drivers/s390/char/sclp_con.c3
-rw-r--r--drivers/s390/char/sclp_vt220.c6
-rw-r--r--drivers/s390/char/tape_34xx.c4
-rw-r--r--drivers/s390/cio/device_fsm.c12
-rw-r--r--drivers/s390/cio/eadm_sch.c12
-rw-r--r--drivers/s390/crypto/ap_bus.h1
-rw-r--r--drivers/s390/crypto/pkey_api.c2
-rw-r--r--drivers/s390/crypto/vfio_ap_ops.c24
-rw-r--r--drivers/s390/crypto/zcrypt_api.c68
-rw-r--r--drivers/s390/crypto/zcrypt_card.c2
-rw-r--r--drivers/s390/crypto/zcrypt_ep11misc.c2
-rw-r--r--fs/aio.c1
-rw-r--r--fs/btrfs/inode.c2
-rw-r--r--fs/btrfs/reflink.c4
-rw-r--r--fs/buffer.c3
-rw-r--r--fs/ceph/file.c2
-rw-r--r--fs/cifs/cifs_debug.c2
-rw-r--r--fs/cifs/cifsfs.c4
-rw-r--r--fs/cifs/cifsglob.h24
-rw-r--r--fs/cifs/cifspdu.h14
-rw-r--r--fs/cifs/cifssmb.c13
-rw-r--r--fs/cifs/connect.c46
-rw-r--r--fs/cifs/file.c8
-rw-r--r--fs/cifs/inode.c2
-rw-r--r--fs/cifs/misc.c2
-rw-r--r--fs/cifs/smb2glob.h11
-rw-r--r--fs/cifs/smb2misc.c14
-rw-r--r--fs/cifs/smb2ops.c246
-rw-r--r--fs/cifs/smb2pdu.c73
-rw-r--r--fs/cifs/smb2pdu.h560
-rw-r--r--fs/cifs/smb2proto.h2
-rw-r--r--fs/crypto/crypto.c2
-rw-r--r--fs/exfat/exfat_fs.h3
-rw-r--r--fs/exfat/file.c2
-rw-r--r--fs/exfat/namei.c55
-rw-r--r--fs/exfat/super.c17
-rw-r--r--fs/ext4/file.c2
-rw-r--r--fs/ext4/inode.c2
-rw-r--r--fs/ext4/readpage.c2
-rw-r--r--fs/f2fs/checkpoint.c2
-rw-r--r--fs/f2fs/data.c6
-rw-r--r--fs/f2fs/file.c2
-rw-r--r--fs/f2fs/node.c4
-rw-r--r--fs/fuse/fuse_i.h2
-rw-r--r--fs/internal.h2
-rw-r--r--fs/io_uring.c133
-rw-r--r--fs/ioctl.c2
-rw-r--r--fs/iomap/buffered-io.c9
-rw-r--r--fs/kernfs/file.c7
-rw-r--r--fs/ksmbd/oplock.c4
-rw-r--r--fs/ksmbd/server.c2
-rw-r--r--fs/ksmbd/smb2pdu.c131
-rw-r--r--fs/ksmbd/smb2pdu.h533
-rw-r--r--fs/ksmbd/transport_tcp.c4
-rw-r--r--fs/namespace.c23
-rw-r--r--fs/nfs/file.c2
-rw-r--r--fs/nilfs2/btnode.c23
-rw-r--r--fs/nilfs2/btnode.h1
-rw-r--r--fs/nilfs2/btree.c27
-rw-r--r--fs/nilfs2/dat.c4
-rw-r--r--fs/nilfs2/gcinode.c7
-rw-r--r--fs/nilfs2/inode.c159
-rw-r--r--fs/nilfs2/mdt.c43
-rw-r--r--fs/nilfs2/mdt.h6
-rw-r--r--fs/nilfs2/nilfs.h16
-rw-r--r--fs/nilfs2/page.c16
-rw-r--r--fs/nilfs2/page.h1
-rw-r--r--fs/nilfs2/segment.c9
-rw-r--r--fs/nilfs2/super.c5
-rw-r--r--fs/ntfs/aops.c2
-rw-r--r--fs/ocfs2/quota_global.c23
-rw-r--r--fs/ocfs2/quota_local.c2
-rw-r--r--fs/read_write.c1
-rw-r--r--fs/seq_file.c4
-rw-r--r--fs/smbfs_common/smb2pdu.h639
-rw-r--r--fs/verity/verify.c4
-rw-r--r--fs/xfs/libxfs/xfs_alloc.c28
-rw-r--r--fs/xfs/libxfs/xfs_alloc.h1
-rw-r--r--fs/xfs/xfs_bio_io.c33
-rw-r--r--fs/xfs/xfs_fsops.c60
-rw-r--r--fs/xfs/xfs_icache.c2
-rw-r--r--fs/xfs/xfs_inode.c2
-rw-r--r--fs/xfs/xfs_inode_item.c162
-rw-r--r--fs/xfs/xfs_inode_item.h1
-rw-r--r--fs/xfs/xfs_linux.h2
-rw-r--r--fs/xfs/xfs_log.c109
-rw-r--r--fs/xfs/xfs_log_cil.c46
-rw-r--r--fs/xfs/xfs_log_priv.h14
-rw-r--r--fs/xfs/xfs_log_recover.c56
-rw-r--r--fs/xfs/xfs_mount.c3
-rw-r--r--fs/xfs/xfs_mount.h15
-rw-r--r--fs/xfs/xfs_super.c3
-rw-r--r--fs/xfs/xfs_trans.c48
-rw-r--r--fs/xfs/xfs_trans_ail.c8
-rw-r--r--include/dt-bindings/clock/sun6i-rtc.h10
-rw-r--r--include/linux/blk-cgroup.h5
-rw-r--r--include/linux/blk_types.h2
-rw-r--r--include/linux/clk/sunxi-ng.h2
-rw-r--r--include/linux/fs.h31
-rw-r--r--include/linux/fsverity.h2
-rw-r--r--include/linux/gfp.h4
-rw-r--r--include/linux/gpio/driver.h13
-rw-r--r--include/linux/input.h2
-rw-r--r--include/linux/input/vivaldi-fmap.h27
-rw-r--r--include/linux/kvm_host.h60
-rw-r--r--include/linux/kvm_types.h11
-rw-r--r--include/linux/mc146818rtc.h2
-rw-r--r--include/linux/net.h19
-rw-r--r--include/linux/nvme.h1
-rw-r--r--include/linux/pagemap.h2
-rw-r--r--include/linux/rtc.h2
-rw-r--r--include/linux/rtc/ds1685.h1
-rw-r--r--include/linux/sbitmap.h2
-rw-r--r--include/linux/seq_file.h1
-rw-r--r--include/linux/user_events.h (renamed from include/uapi/linux/user_events.h)0
-rw-r--r--include/linux/xarray.h1
-rw-r--r--include/sound/pcm.h1
-rw-r--r--include/uapi/linux/io_uring.h2
-rw-r--r--include/uapi/linux/loop.h4
-rw-r--r--include/uapi/linux/rtc.h3
-rw-r--r--kernel/trace/Kconfig1
-rw-r--r--kernel/trace/trace_events_user.c5
-rw-r--r--kernel/watch_queue.c1
-rw-r--r--lib/sbitmap.c2
-rw-r--r--lib/test_xarray.c22
-rw-r--r--lib/xarray.c4
-rw-r--r--mm/damon/core.c5
-rw-r--r--mm/filemap.c12
-rw-r--r--mm/gup.c10
-rw-r--r--mm/internal.h6
-rw-r--r--mm/kfence/core.c11
-rw-r--r--mm/kfence/kfence.h3
-rw-r--r--mm/kmemleak.c9
-rw-r--r--mm/madvise.c9
-rw-r--r--mm/memory.c12
-rw-r--r--mm/migrate.c2
-rw-r--r--mm/mlock.c46
-rw-r--r--mm/page_alloc.c1
-rw-r--r--mm/readahead.c204
-rw-r--r--mm/rmap.c4
-rw-r--r--mm/swap.c4
-rw-r--r--sound/core/pcm.c1
-rw-r--r--sound/core/pcm_lib.c9
-rw-r--r--sound/core/pcm_native.c39
-rw-r--r--sound/isa/cs423x/cs4236.c8
-rw-r--r--sound/pci/hda/patch_cs8409-tables.c68
-rw-r--r--sound/pci/hda/patch_cs8409.c47
-rw-r--r--sound/pci/hda/patch_cs8409.h5
-rw-r--r--sound/pci/hda/patch_hdmi.c8
-rw-r--r--sound/pci/hda/patch_realtek.c14
-rw-r--r--sound/soc/codecs/mt6358.c4
-rw-r--r--sound/soc/fsl/fsl-asoc-card.c12
-rw-r--r--sound/soc/rockchip/rockchip_i2s_tdm.c10
-rw-r--r--sound/soc/sof/intel/Kconfig1
-rw-r--r--tools/vm/page_owner_sort.c6
-rw-r--r--virt/kvm/kvm_main.c22
-rw-r--r--virt/kvm/pfncache.c72
417 files changed, 8145 insertions, 4430 deletions
diff --git a/.mailmap b/.mailmap
index 8fd9b3c7a42b..b9d358217586 100644
--- a/.mailmap
+++ b/.mailmap
@@ -213,6 +213,7 @@ Kees Cook <keescook@chromium.org> <kees@ubuntu.com>
Keith Busch <kbusch@kernel.org> <keith.busch@intel.com>
Keith Busch <kbusch@kernel.org> <keith.busch@linux.intel.com>
Kenneth W Chen <kenneth.w.chen@intel.com>
+Kirill Tkhai <kirill.tkhai@openvz.org> <ktkhai@virtuozzo.com>
Konstantin Khlebnikov <koct9i@gmail.com> <khlebnikov@yandex-team.ru>
Konstantin Khlebnikov <koct9i@gmail.com> <k.khlebnikov@samsung.com>
Koushik <raghavendra.koushik@neterion.com>
diff --git a/Documentation/core-api/xarray.rst b/Documentation/core-api/xarray.rst
index a137a0e6d068..77e0ece2b1d6 100644
--- a/Documentation/core-api/xarray.rst
+++ b/Documentation/core-api/xarray.rst
@@ -315,11 +315,15 @@ indeed the normal API is implemented in terms of the advanced API. The
advanced API is only available to modules with a GPL-compatible license.
The advanced API is based around the xa_state. This is an opaque data
-structure which you declare on the stack using the XA_STATE()
-macro. This macro initialises the xa_state ready to start walking
-around the XArray. It is used as a cursor to maintain the position
-in the XArray and let you compose various operations together without
-having to restart from the top every time.
+structure which you declare on the stack using the XA_STATE() macro.
+This macro initialises the xa_state ready to start walking around the
+XArray. It is used as a cursor to maintain the position in the XArray
+and let you compose various operations together without having to restart
+from the top every time. The contents of the xa_state are protected by
+the rcu_read_lock() or the xas_lock(). If you need to drop whichever of
+those locks is protecting your state and tree, you must call xas_pause()
+so that future calls do not rely on the parts of the state which were
+left unprotected.
The xa_state is also used to store errors. You can call
xas_error() to retrieve the error. All operations check whether
diff --git a/Documentation/devicetree/bindings/arm/msm/qcom,idle-state.txt b/Documentation/devicetree/bindings/arm/msm/qcom,idle-state.txt
index 6ce0b212ec6d..606b4b1b709d 100644
--- a/Documentation/devicetree/bindings/arm/msm/qcom,idle-state.txt
+++ b/Documentation/devicetree/bindings/arm/msm/qcom,idle-state.txt
@@ -81,4 +81,4 @@ Example:
};
};
-[1]. Documentation/devicetree/bindings/arm/idle-states.yaml
+[1]. Documentation/devicetree/bindings/cpu/idle-states.yaml
diff --git a/Documentation/devicetree/bindings/arm/psci.yaml b/Documentation/devicetree/bindings/arm/psci.yaml
index 8b77cf83a095..dd83ef278af0 100644
--- a/Documentation/devicetree/bindings/arm/psci.yaml
+++ b/Documentation/devicetree/bindings/arm/psci.yaml
@@ -101,7 +101,7 @@ properties:
bindings in [1]) must specify this property.
[1] Kernel documentation - ARM idle states bindings
- Documentation/devicetree/bindings/arm/idle-states.yaml
+ Documentation/devicetree/bindings/cpu/idle-states.yaml
patternProperties:
"^power-domain-":
diff --git a/Documentation/devicetree/bindings/arm/idle-states.yaml b/Documentation/devicetree/bindings/cpu/idle-states.yaml
index 4d381fa1ee57..fa4d4142ac93 100644
--- a/Documentation/devicetree/bindings/arm/idle-states.yaml
+++ b/Documentation/devicetree/bindings/cpu/idle-states.yaml
@@ -1,25 +1,30 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
-$id: http://devicetree.org/schemas/arm/idle-states.yaml#
+$id: http://devicetree.org/schemas/cpu/idle-states.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
-title: ARM idle states binding description
+title: Idle states binding description
maintainers:
- Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+ - Anup Patel <anup@brainfault.org>
description: |+
==========================================
1 - Introduction
==========================================
- ARM systems contain HW capable of managing power consumption dynamically,
- where cores can be put in different low-power states (ranging from simple wfi
- to power gating) according to OS PM policies. The CPU states representing the
- range of dynamic idle states that a processor can enter at run-time, can be
- specified through device tree bindings representing the parameters required to
- enter/exit specific idle states on a given processor.
+ ARM and RISC-V systems contain HW capable of managing power consumption
+ dynamically, where cores can be put in different low-power states (ranging
+ from simple wfi to power gating) according to OS PM policies. The CPU states
+ representing the range of dynamic idle states that a processor can enter at
+ run-time, can be specified through device tree bindings representing the
+ parameters required to enter/exit specific idle states on a given processor.
+
+ ==========================================
+ 2 - ARM idle states
+ ==========================================
According to the Server Base System Architecture document (SBSA, [3]), the
power states an ARM CPU can be put into are identified by the following list:
@@ -43,8 +48,23 @@ description: |+
The device tree binding definition for ARM idle states is the subject of this
document.
+ ==========================================
+ 3 - RISC-V idle states
+ ==========================================
+
+ On RISC-V systems, the HARTs (or CPUs) [6] can be put in platform specific
+ suspend (or idle) states (ranging from simple WFI, power gating, etc). The
+ RISC-V SBI v0.3 (or higher) [7] hart state management extension provides a
+ standard mechanism for OS to request HART state transitions.
+
+ The platform specific suspend (or idle) states of a hart can be either
+ retentive or non-rententive in nature. A retentive suspend state will
+ preserve HART registers and CSR values for all privilege modes whereas
+ a non-retentive suspend state will not preserve HART registers and CSR
+ values.
+
===========================================
- 2 - idle-states definitions
+ 4 - idle-states definitions
===========================================
Idle states are characterized for a specific system through a set of
@@ -211,10 +231,10 @@ description: |+
properties specification that is the subject of the following sections.
===========================================
- 3 - idle-states node
+ 5 - idle-states node
===========================================
- ARM processor idle states are defined within the idle-states node, which is
+ The processor idle states are defined within the idle-states node, which is
a direct child of the cpus node [1] and provides a container where the
processor idle states, defined as device tree nodes, are listed.
@@ -223,7 +243,7 @@ description: |+
just supports idle_standby, an idle-states node is not required.
===========================================
- 4 - References
+ 6 - References
===========================================
[1] ARM Linux Kernel documentation - CPUs bindings
@@ -238,9 +258,15 @@ description: |+
[4] ARM Architecture Reference Manuals
http://infocenter.arm.com/help/index.jsp
- [6] ARM Linux Kernel documentation - Booting AArch64 Linux
+ [5] ARM Linux Kernel documentation - Booting AArch64 Linux
Documentation/arm64/booting.rst
+ [6] RISC-V Linux Kernel documentation - CPUs bindings
+ Documentation/devicetree/bindings/riscv/cpus.yaml
+
+ [7] RISC-V Supervisor Binary Interface (SBI)
+ http://github.com/riscv/riscv-sbi-doc/riscv-sbi.adoc
+
properties:
$nodename:
const: idle-states
@@ -253,7 +279,7 @@ properties:
On ARM 32-bit systems this property is optional
This assumes that the "enable-method" property is set to "psci" in the cpu
- node[6] that is responsible for setting up CPU idle management in the OS
+ node[5] that is responsible for setting up CPU idle management in the OS
implementation.
const: psci
@@ -265,8 +291,8 @@ patternProperties:
as follows.
The idle state entered by executing the wfi instruction (idle_standby
- SBSA,[3][4]) is considered standard on all ARM platforms and therefore
- must not be listed.
+ SBSA,[3][4]) is considered standard on all ARM and RISC-V platforms and
+ therefore must not be listed.
In addition to the properties listed above, a state node may require
additional properties specific to the entry-method defined in the
@@ -275,7 +301,27 @@ patternProperties:
properties:
compatible:
- const: arm,idle-state
+ enum:
+ - arm,idle-state
+ - riscv,idle-state
+
+ arm,psci-suspend-param:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: |
+ power_state parameter to pass to the ARM PSCI suspend call.
+
+ Device tree nodes that require usage of PSCI CPU_SUSPEND function
+ (i.e. idle states node with entry-method property is set to "psci")
+ must specify this property.
+
+ riscv,sbi-suspend-param:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: |
+ suspend_type parameter to pass to the RISC-V SBI HSM suspend call.
+
+ This property is required in idle state nodes of device tree meant
+ for RISC-V systems. For more details on the suspend_type parameter
+ refer the SBI specifiation v0.3 (or higher) [7].
local-timer-stop:
description:
@@ -317,6 +363,8 @@ patternProperties:
description:
A string used as a descriptive name for the idle state.
+ additionalProperties: false
+
required:
- compatible
- entry-latency-us
@@ -658,4 +706,150 @@ examples:
};
};
+ - |
+ // Example 3 (RISC-V 64-bit, 4-cpu systems, two clusters):
+
+ cpus {
+ #size-cells = <0>;
+ #address-cells = <1>;
+
+ cpu@0 {
+ device_type = "cpu";
+ compatible = "riscv";
+ reg = <0x0>;
+ riscv,isa = "rv64imafdc";
+ mmu-type = "riscv,sv48";
+ cpu-idle-states = <&CPU_RET_0_0>, <&CPU_NONRET_0_0>,
+ <&CLUSTER_RET_0>, <&CLUSTER_NONRET_0>;
+
+ cpu_intc0: interrupt-controller {
+ #interrupt-cells = <1>;
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ };
+ };
+
+ cpu@1 {
+ device_type = "cpu";
+ compatible = "riscv";
+ reg = <0x1>;
+ riscv,isa = "rv64imafdc";
+ mmu-type = "riscv,sv48";
+ cpu-idle-states = <&CPU_RET_0_0>, <&CPU_NONRET_0_0>,
+ <&CLUSTER_RET_0>, <&CLUSTER_NONRET_0>;
+
+ cpu_intc1: interrupt-controller {
+ #interrupt-cells = <1>;
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ };
+ };
+
+ cpu@10 {
+ device_type = "cpu";
+ compatible = "riscv";
+ reg = <0x10>;
+ riscv,isa = "rv64imafdc";
+ mmu-type = "riscv,sv48";
+ cpu-idle-states = <&CPU_RET_1_0>, <&CPU_NONRET_1_0>,
+ <&CLUSTER_RET_1>, <&CLUSTER_NONRET_1>;
+
+ cpu_intc10: interrupt-controller {
+ #interrupt-cells = <1>;
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ };
+ };
+
+ cpu@11 {
+ device_type = "cpu";
+ compatible = "riscv";
+ reg = <0x11>;
+ riscv,isa = "rv64imafdc";
+ mmu-type = "riscv,sv48";
+ cpu-idle-states = <&CPU_RET_1_0>, <&CPU_NONRET_1_0>,
+ <&CLUSTER_RET_1>, <&CLUSTER_NONRET_1>;
+
+ cpu_intc11: interrupt-controller {
+ #interrupt-cells = <1>;
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ };
+ };
+
+ idle-states {
+ CPU_RET_0_0: cpu-retentive-0-0 {
+ compatible = "riscv,idle-state";
+ riscv,sbi-suspend-param = <0x10000000>;
+ entry-latency-us = <20>;
+ exit-latency-us = <40>;
+ min-residency-us = <80>;
+ };
+
+ CPU_NONRET_0_0: cpu-nonretentive-0-0 {
+ compatible = "riscv,idle-state";
+ riscv,sbi-suspend-param = <0x90000000>;
+ entry-latency-us = <250>;
+ exit-latency-us = <500>;
+ min-residency-us = <950>;
+ };
+
+ CLUSTER_RET_0: cluster-retentive-0 {
+ compatible = "riscv,idle-state";
+ riscv,sbi-suspend-param = <0x11000000>;
+ local-timer-stop;
+ entry-latency-us = <50>;
+ exit-latency-us = <100>;
+ min-residency-us = <250>;
+ wakeup-latency-us = <130>;
+ };
+
+ CLUSTER_NONRET_0: cluster-nonretentive-0 {
+ compatible = "riscv,idle-state";
+ riscv,sbi-suspend-param = <0x91000000>;
+ local-timer-stop;
+ entry-latency-us = <600>;
+ exit-latency-us = <1100>;
+ min-residency-us = <2700>;
+ wakeup-latency-us = <1500>;
+ };
+
+ CPU_RET_1_0: cpu-retentive-1-0 {
+ compatible = "riscv,idle-state";
+ riscv,sbi-suspend-param = <0x10000010>;
+ entry-latency-us = <20>;
+ exit-latency-us = <40>;
+ min-residency-us = <80>;
+ };
+
+ CPU_NONRET_1_0: cpu-nonretentive-1-0 {
+ compatible = "riscv,idle-state";
+ riscv,sbi-suspend-param = <0x90000010>;
+ entry-latency-us = <250>;
+ exit-latency-us = <500>;
+ min-residency-us = <950>;
+ };
+
+ CLUSTER_RET_1: cluster-retentive-1 {
+ compatible = "riscv,idle-state";
+ riscv,sbi-suspend-param = <0x11000010>;
+ local-timer-stop;
+ entry-latency-us = <50>;
+ exit-latency-us = <100>;
+ min-residency-us = <250>;
+ wakeup-latency-us = <130>;
+ };
+
+ CLUSTER_NONRET_1: cluster-nonretentive-1 {
+ compatible = "riscv,idle-state";
+ riscv,sbi-suspend-param = <0x91000010>;
+ local-timer-stop;
+ entry-latency-us = <600>;
+ exit-latency-us = <1100>;
+ min-residency-us = <2700>;
+ wakeup-latency-us = <1500>;
+ };
+ };
+ };
+
...
diff --git a/Documentation/devicetree/bindings/input/mediatek,mt6779-keypad.yaml b/Documentation/devicetree/bindings/input/mediatek,mt6779-keypad.yaml
new file mode 100644
index 000000000000..b1770640f94b
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/mediatek,mt6779-keypad.yaml
@@ -0,0 +1,77 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/input/mediatek,mt6779-keypad.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Mediatek's Keypad Controller device tree bindings
+
+maintainers:
+ - Fengping Yu <fengping.yu@mediatek.com>
+
+allOf:
+ - $ref: "/schemas/input/matrix-keymap.yaml#"
+
+description: |
+ Mediatek's Keypad controller is used to interface a SoC with a matrix-type
+ keypad device. The keypad controller supports multiple row and column lines.
+ A key can be placed at each intersection of a unique row and a unique column.
+ The keypad controller can sense a key-press and key-release and report the
+ event using a interrupt to the cpu.
+
+properties:
+ compatible:
+ oneOf:
+ - const: mediatek,mt6779-keypad
+ - items:
+ - enum:
+ - mediatek,mt6873-keypad
+ - const: mediatek,mt6779-keypad
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: kpd
+
+ wakeup-source:
+ description: use any event on keypad as wakeup event
+ type: boolean
+
+ debounce-delay-ms:
+ maximum: 256
+ default: 16
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/input/input.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ keyboard@10010000 {
+ compatible = "mediatek,mt6779-keypad";
+ reg = <0 0x10010000 0 0x1000>;
+ interrupts = <GIC_SPI 75 IRQ_TYPE_EDGE_FALLING>;
+ clocks = <&clk26m>;
+ clock-names = "kpd";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/input/mtk-pmic-keys.txt b/Documentation/devicetree/bindings/input/mtk-pmic-keys.txt
index 535d92885372..9d00f2a8e13a 100644
--- a/Documentation/devicetree/bindings/input/mtk-pmic-keys.txt
+++ b/Documentation/devicetree/bindings/input/mtk-pmic-keys.txt
@@ -9,7 +9,10 @@ For MT6397/MT6323 MFD bindings see:
Documentation/devicetree/bindings/mfd/mt6397.txt
Required properties:
-- compatible: "mediatek,mt6397-keys" or "mediatek,mt6323-keys"
+- compatible: Should be one of:
+ - "mediatek,mt6397-keys"
+ - "mediatek,mt6323-keys"
+ - "mediatek,mt6358-keys"
- linux,keycodes: See Documentation/devicetree/bindings/input/input.yaml
Optional Properties:
diff --git a/Documentation/devicetree/bindings/input/touchscreen/imagis,ist3038c.yaml b/Documentation/devicetree/bindings/input/touchscreen/imagis,ist3038c.yaml
new file mode 100644
index 000000000000..e3a2b871e50c
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/touchscreen/imagis,ist3038c.yaml
@@ -0,0 +1,74 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/input/touchscreen/imagis,ist3038c.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Imagis IST30XXC family touchscreen controller bindings
+
+maintainers:
+ - Markuss Broks <markuss.broks@gmail.com>
+
+allOf:
+ - $ref: touchscreen.yaml#
+
+properties:
+ $nodename:
+ pattern: "^touchscreen@[0-9a-f]+$"
+
+ compatible:
+ enum:
+ - imagis,ist3038c
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ vdd-supply:
+ description: Power supply regulator for the chip
+
+ vddio-supply:
+ description: Power supply regulator for the I2C bus
+
+ touchscreen-size-x: true
+ touchscreen-size-y: true
+ touchscreen-fuzz-x: true
+ touchscreen-fuzz-y: true
+ touchscreen-inverted-x: true
+ touchscreen-inverted-y: true
+ touchscreen-swapped-x-y: true
+
+additionalProperties: false
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - touchscreen-size-x
+ - touchscreen-size-y
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ touchscreen@50 {
+ compatible = "imagis,ist3038c";
+ reg = <0x50>;
+ interrupt-parent = <&gpio>;
+ interrupts = <13 IRQ_TYPE_EDGE_FALLING>;
+ vdd-supply = <&ldo1_reg>;
+ vddio-supply = <&ldo2_reg>;
+ touchscreen-size-x = <720>;
+ touchscreen-size-y = <1280>;
+ touchscreen-fuzz-x = <10>;
+ touchscreen-fuzz-y = <10>;
+ touchscreen-inverted-x;
+ touchscreen-inverted-y;
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/riscv/cpus.yaml b/Documentation/devicetree/bindings/riscv/cpus.yaml
index aa5fb64d57eb..d632ac76532e 100644
--- a/Documentation/devicetree/bindings/riscv/cpus.yaml
+++ b/Documentation/devicetree/bindings/riscv/cpus.yaml
@@ -99,6 +99,14 @@ properties:
- compatible
- interrupt-controller
+ cpu-idle-states:
+ $ref: '/schemas/types.yaml#/definitions/phandle-array'
+ items:
+ maxItems: 1
+ description: |
+ List of phandles to idle state nodes supported
+ by this hart (see ./idle-states.yaml).
+
required:
- riscv,isa
- interrupt-controller
diff --git a/Documentation/devicetree/bindings/rtc/allwinner,sun6i-a31-rtc.yaml b/Documentation/devicetree/bindings/rtc/allwinner,sun6i-a31-rtc.yaml
index beeb90e55727..0b767fec39d8 100644
--- a/Documentation/devicetree/bindings/rtc/allwinner,sun6i-a31-rtc.yaml
+++ b/Documentation/devicetree/bindings/rtc/allwinner,sun6i-a31-rtc.yaml
@@ -16,16 +16,22 @@ properties:
compatible:
oneOf:
- - const: allwinner,sun6i-a31-rtc
- - const: allwinner,sun8i-a23-rtc
- - const: allwinner,sun8i-h3-rtc
- - const: allwinner,sun8i-r40-rtc
- - const: allwinner,sun8i-v3-rtc
- - const: allwinner,sun50i-h5-rtc
+ - enum:
+ - allwinner,sun6i-a31-rtc
+ - allwinner,sun8i-a23-rtc
+ - allwinner,sun8i-h3-rtc
+ - allwinner,sun8i-r40-rtc
+ - allwinner,sun8i-v3-rtc
+ - allwinner,sun50i-h5-rtc
+ - allwinner,sun50i-h6-rtc
+ - allwinner,sun50i-h616-rtc
+ - allwinner,sun50i-r329-rtc
- items:
- const: allwinner,sun50i-a64-rtc
- const: allwinner,sun8i-h3-rtc
- - const: allwinner,sun50i-h6-rtc
+ - items:
+ - const: allwinner,sun20i-d1-rtc
+ - const: allwinner,sun50i-r329-rtc
reg:
maxItems: 1
@@ -37,7 +43,12 @@ properties:
- description: RTC Alarm 1
clocks:
- maxItems: 1
+ minItems: 1
+ maxItems: 4
+
+ clock-names:
+ minItems: 1
+ maxItems: 4
clock-output-names:
minItems: 1
@@ -85,6 +96,7 @@ allOf:
enum:
- allwinner,sun8i-h3-rtc
- allwinner,sun50i-h5-rtc
+ - allwinner,sun50i-h6-rtc
then:
properties:
@@ -96,19 +108,68 @@ allOf:
properties:
compatible:
contains:
- const: allwinner,sun50i-h6-rtc
+ const: allwinner,sun50i-h616-rtc
then:
properties:
- clock-output-names:
+ clocks:
minItems: 3
maxItems: 3
+ items:
+ - description: Bus clock for register access
+ - description: 24 MHz oscillator
+ - description: 32 kHz clock from the CCU
+
+ clock-names:
+ minItems: 3
+ maxItems: 3
+ items:
+ - const: bus
+ - const: hosc
+ - const: pll-32k
+
+ required:
+ - clocks
+ - clock-names
- if:
properties:
compatible:
contains:
- const: allwinner,sun8i-r40-rtc
+ const: allwinner,sun50i-r329-rtc
+
+ then:
+ properties:
+ clocks:
+ minItems: 3
+ maxItems: 4
+ items:
+ - description: Bus clock for register access
+ - description: 24 MHz oscillator
+ - description: AHB parent for internal SPI clock
+ - description: External 32768 Hz oscillator
+
+ clock-names:
+ minItems: 3
+ maxItems: 4
+ items:
+ - const: bus
+ - const: hosc
+ - const: ahb
+ - const: ext-osc32k
+
+ required:
+ - clocks
+ - clock-names
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - allwinner,sun8i-r40-rtc
+ - allwinner,sun50i-h616-rtc
+ - allwinner,sun50i-r329-rtc
then:
properties:
@@ -127,7 +188,6 @@ required:
- compatible
- reg
- interrupts
- - clock-output-names
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/rtc/atmel,at91sam9-rtc.txt b/Documentation/devicetree/bindings/rtc/atmel,at91sam9-rtc.txt
deleted file mode 100644
index 3f0e2a5950eb..000000000000
--- a/Documentation/devicetree/bindings/rtc/atmel,at91sam9-rtc.txt
+++ /dev/null
@@ -1,25 +0,0 @@
-Atmel AT91SAM9260 Real Time Timer
-
-Required properties:
-- compatible: should be one of the following:
- - "atmel,at91sam9260-rtt"
- - "microchip,sam9x60-rtt", "atmel,at91sam9260-rtt"
-- reg: should encode the memory region of the RTT controller
-- interrupts: rtt alarm/event interrupt
-- clocks: should contain the 32 KHz slow clk that will drive the RTT block.
-- atmel,rtt-rtc-time-reg: should encode the GPBR register used to store
- the time base when the RTT is used as an RTC.
- The first cell should point to the GPBR node and the second one
- encode the offset within the GPBR block (or in other words, the
- GPBR register used to store the time base).
-
-
-Example:
-
-rtt@fffffd20 {
- compatible = "atmel,at91sam9260-rtt";
- reg = <0xfffffd20 0x10>;
- interrupts = <1 4 7>;
- clocks = <&clk32k>;
- atmel,rtt-rtc-time-reg = <&gpbr 0x0>;
-};
diff --git a/Documentation/devicetree/bindings/rtc/atmel,at91sam9260-rtt.yaml b/Documentation/devicetree/bindings/rtc/atmel,at91sam9260-rtt.yaml
new file mode 100644
index 000000000000..0ef1b7ff4a77
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/atmel,at91sam9260-rtt.yaml
@@ -0,0 +1,69 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright (C) 2022 Microchip Technology, Inc. and its subsidiaries
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/rtc/atmel,at91sam9260-rtt.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Atmel AT91 RTT Device Tree Bindings
+
+allOf:
+ - $ref: "rtc.yaml#"
+
+maintainers:
+ - Alexandre Belloni <alexandre.belloni@bootlin.com>
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - const: atmel,at91sam9260-rtt
+ - items:
+ - const: microchip,sam9x60-rtt
+ - const: atmel,at91sam9260-rtt
+ - items:
+ - const: microchip,sama7g5-rtt
+ - const: microchip,sam9x60-rtt
+ - const: atmel,at91sam9260-rtt
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ atmel,rtt-rtc-time-reg:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ items:
+ - items:
+ - description: Phandle to the GPBR node.
+ - description: Offset within the GPBR block.
+ description:
+ Should encode the GPBR register used to store the time base when the
+ RTT is used as an RTC. The first cell should point to the GPBR node
+ and the second one encodes the offset within the GPBR block (or in
+ other words, the GPBR register used to store the time base).
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - atmel,rtt-rtc-time-reg
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ rtc@fffffd20 {
+ compatible = "atmel,at91sam9260-rtt";
+ reg = <0xfffffd20 0x10>;
+ interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
+ clocks = <&clk32k>;
+ atmel,rtt-rtc-time-reg = <&gpbr 0x0>;
+ };
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml
index 8fe2d934f949..01430973ecec 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.yaml
+++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml
@@ -560,6 +560,8 @@ patternProperties:
description: Ingenieurburo Fur Ic-Technologie (I/F/I)
"^ilitek,.*":
description: ILI Technology Corporation (ILITEK)
+ "^imagis,.*":
+ description: Imagis Technologies Co., Ltd.
"^img,.*":
description: Imagination Technologies Ltd.
"^imi,.*":
diff --git a/Documentation/filesystems/cifs/ksmbd.rst b/Documentation/filesystems/cifs/ksmbd.rst
index b0d354fd8066..1af600db2e70 100644
--- a/Documentation/filesystems/cifs/ksmbd.rst
+++ b/Documentation/filesystems/cifs/ksmbd.rst
@@ -82,10 +82,10 @@ Signing Update Supported.
Pre-authentication integrity Supported.
SMB3 encryption(CCM, GCM) Supported. (CCM and GCM128 supported, GCM256 in
progress)
-SMB direct(RDMA) Partially Supported. SMB3 Multi-channel is
- required to connect to Windows client.
+SMB direct(RDMA) Supported.
SMB3 Multi-channel Partially Supported. Planned to implement
replay/retry mechanisms for future.
+Receive Side Scaling mode Supported.
SMB3.1.1 POSIX extension Supported.
ACLs Partially Supported. only DACLs available, SACLs
(auditing) is planned for the future. For
diff --git a/Documentation/filesystems/fsverity.rst b/Documentation/filesystems/fsverity.rst
index 1d831e3cbcb3..8cc536d08f51 100644
--- a/Documentation/filesystems/fsverity.rst
+++ b/Documentation/filesystems/fsverity.rst
@@ -549,7 +549,7 @@ Pagecache
~~~~~~~~~
For filesystems using Linux's pagecache, the ``->readpage()`` and
-``->readpages()`` methods must be modified to verify pages before they
+``->readahead()`` methods must be modified to verify pages before they
are marked Uptodate. Merely hooking ``->read_iter()`` would be
insufficient, since ``->read_iter()`` is not used for memory maps.
@@ -611,7 +611,7 @@ workqueue, and then the workqueue work does the decryption or
verification. Finally, pages where no decryption or verity error
occurred are marked Uptodate, and the pages are unlocked.
-Files on ext4 and f2fs may contain holes. Normally, ``->readpages()``
+Files on ext4 and f2fs may contain holes. Normally, ``->readahead()``
simply zeroes holes and sets the corresponding pages Uptodate; no bios
are issued. To prevent this case from bypassing fs-verity, these
filesystems use fsverity_verify_page() to verify hole pages.
@@ -778,7 +778,7 @@ weren't already directly answered in other parts of this document.
- To prevent bypassing verification, pages must not be marked
Uptodate until they've been verified. Currently, each
filesystem is responsible for marking pages Uptodate via
- ``->readpages()``. Therefore, currently it's not possible for
+ ``->readahead()``. Therefore, currently it's not possible for
the VFS to do the verification on its own. Changing this would
require significant changes to the VFS and all filesystems.
diff --git a/Documentation/filesystems/locking.rst b/Documentation/filesystems/locking.rst
index 2998cec9af4b..c26d854275a0 100644
--- a/Documentation/filesystems/locking.rst
+++ b/Documentation/filesystems/locking.rst
@@ -241,8 +241,6 @@ prototypes::
int (*writepages)(struct address_space *, struct writeback_control *);
bool (*dirty_folio)(struct address_space *, struct folio *folio);
void (*readahead)(struct readahead_control *);
- int (*readpages)(struct file *filp, struct address_space *mapping,
- struct list_head *pages, unsigned nr_pages);
int (*write_begin)(struct file *, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata);
@@ -274,7 +272,6 @@ readpage: yes, unlocks shared
writepages:
dirty_folio maybe
readahead: yes, unlocks shared
-readpages: no shared
write_begin: locks the page exclusive
write_end: yes, unlocks exclusive
bmap:
@@ -300,9 +297,6 @@ completion.
->readahead() unlocks the pages that I/O is attempted on like ->readpage().
-->readpages() populates the pagecache with the passed pages and starts
-I/O against them. They come unlocked upon I/O completion.
-
->writepage() is used for two purposes: for "memory cleansing" and for
"sync". These are quite different operations and the behaviour may differ
depending upon the mode.
diff --git a/Documentation/filesystems/vfs.rst b/Documentation/filesystems/vfs.rst
index 4f14edf93941..794bd1a66bfb 100644
--- a/Documentation/filesystems/vfs.rst
+++ b/Documentation/filesystems/vfs.rst
@@ -726,8 +726,6 @@ cache in your filesystem. The following members are defined:
int (*writepages)(struct address_space *, struct writeback_control *);
bool (*dirty_folio)(struct address_space *, struct folio *);
void (*readahead)(struct readahead_control *);
- int (*readpages)(struct file *filp, struct address_space *mapping,
- struct list_head *pages, unsigned nr_pages);
int (*write_begin)(struct file *, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata);
@@ -817,15 +815,6 @@ cache in your filesystem. The following members are defined:
completes successfully. Setting PageError on any page will be
ignored; simply unlock the page if an I/O error occurs.
-``readpages``
- called by the VM to read pages associated with the address_space
- object. This is essentially just a vector version of readpage.
- Instead of just one page, several pages are requested.
- readpages is only used for read-ahead, so read errors are
- ignored. If anything goes wrong, feel free to give up.
- This interface is deprecated and will be removed by the end of
- 2020; implement readahead instead.
-
``write_begin``
Called by the generic buffered write code to ask the filesystem
to prepare to write len bytes at the given offset in the file.
diff --git a/Documentation/riscv/index.rst b/Documentation/riscv/index.rst
index ea915c196048..e23b876ad6eb 100644
--- a/Documentation/riscv/index.rst
+++ b/Documentation/riscv/index.rst
@@ -7,7 +7,6 @@ RISC-V architecture
boot-image-header
vm-layout
- pmu
patch-acceptance
features
diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst
index 07a45474abe9..d13fa6600467 100644
--- a/Documentation/virt/kvm/api.rst
+++ b/Documentation/virt/kvm/api.rst
@@ -151,12 +151,6 @@ In order to create user controlled virtual machines on S390, check
KVM_CAP_S390_UCONTROL and use the flag KVM_VM_S390_UCONTROL as
privileged user (CAP_SYS_ADMIN).
-To use hardware assisted virtualization on MIPS (VZ ASE) rather than
-the default trap & emulate implementation (which changes the virtual
-memory layout to fit in user mode), check KVM_CAP_MIPS_VZ and use the
-flag KVM_VM_MIPS_VZ.
-
-
On arm64, the physical address size for a VM (IPA Size limit) is limited
to 40bits by default. The limit can be configured if the host supports the
extension KVM_CAP_ARM_VM_IPA_SIZE. When supported, use
@@ -4081,6 +4075,11 @@ x2APIC MSRs are always allowed, independent of the ``default_allow`` setting,
and their behavior depends on the ``X2APIC_ENABLE`` bit of the APIC base
register.
+.. warning::
+ MSR accesses coming from nested vmentry/vmexit are not filtered.
+ This includes both writes to individual VMCS fields and reads/writes
+ through the MSR lists pointed to by the VMCS.
+
If a bit is within one of the defined ranges, read and write accesses are
guarded by the bitmap's value for the MSR index if the kind of access
is included in the ``struct kvm_msr_filter_range`` flags. If no range
@@ -5293,6 +5292,10 @@ type values:
KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO
Sets the guest physical address of the vcpu_info for a given vCPU.
+ As with the shared_info page for the VM, the corresponding page may be
+ dirtied at any time if event channel interrupt delivery is enabled, so
+ userspace should always assume that the page is dirty without relying
+ on dirty logging.
KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO
Sets the guest physical address of an additional pvclock structure
@@ -7719,3 +7722,49 @@ only be invoked on a VM prior to the creation of VCPUs.
At this time, KVM_PMU_CAP_DISABLE is the only capability. Setting
this capability will disable PMU virtualization for that VM. Usermode
should adjust CPUID leaf 0xA to reflect that the PMU is disabled.
+
+9. Known KVM API problems
+=========================
+
+In some cases, KVM's API has some inconsistencies or common pitfalls
+that userspace need to be aware of. This section details some of
+these issues.
+
+Most of them are architecture specific, so the section is split by
+architecture.
+
+9.1. x86
+--------
+
+``KVM_GET_SUPPORTED_CPUID`` issues
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+In general, ``KVM_GET_SUPPORTED_CPUID`` is designed so that it is possible
+to take its result and pass it directly to ``KVM_SET_CPUID2``. This section
+documents some cases in which that requires some care.
+
+Local APIC features
+~~~~~~~~~~~~~~~~~~~
+
+CPU[EAX=1]:ECX[21] (X2APIC) is reported by ``KVM_GET_SUPPORTED_CPUID``,
+but it can only be enabled if ``KVM_CREATE_IRQCHIP`` or
+``KVM_ENABLE_CAP(KVM_CAP_IRQCHIP_SPLIT)`` are used to enable in-kernel emulation of
+the local APIC.
+
+The same is true for the ``KVM_FEATURE_PV_UNHALT`` paravirtualized feature.
+
+CPU[EAX=1]:ECX[24] (TSC_DEADLINE) is not reported by ``KVM_GET_SUPPORTED_CPUID``.
+It can be enabled if ``KVM_CAP_TSC_DEADLINE_TIMER`` is present and the kernel
+has enabled in-kernel emulation of the local APIC.
+
+Obsolete ioctls and capabilities
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+KVM_CAP_DISABLE_QUIRKS does not let userspace know which quirks are actually
+available. Use ``KVM_CHECK_EXTENSION(KVM_CAP_DISABLE_QUIRKS2)`` instead if
+available.
+
+Ordering of KVM_GET_*/KVM_SET_* ioctls
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+TBD
diff --git a/Documentation/virt/kvm/index.rst b/Documentation/virt/kvm/index.rst
index b6833c7bb474..e0a2c74e1043 100644
--- a/Documentation/virt/kvm/index.rst
+++ b/Documentation/virt/kvm/index.rst
@@ -8,25 +8,13 @@ KVM
:maxdepth: 2
api
- amd-memory-encryption
- cpuid
- halt-polling
- hypercalls
- locking
- mmu
- msr
- nested-vmx
- ppc-pv
- s390-diag
- s390-pv
- s390-pv-boot
- timekeeping
- vcpu-requests
-
- review-checklist
+ devices/index
arm/index
+ s390/index
+ ppc-pv
+ x86/index
- devices/index
-
- running-nested-guests
+ locking
+ vcpu-requests
+ review-checklist
diff --git a/Documentation/virt/kvm/locking.rst b/Documentation/virt/kvm/locking.rst
index 5d27da356836..845a561629f1 100644
--- a/Documentation/virt/kvm/locking.rst
+++ b/Documentation/virt/kvm/locking.rst
@@ -210,32 +210,47 @@ time it will be set using the Dirty tracking mechanism described above.
3. Reference
------------
-:Name: kvm_lock
+``kvm_lock``
+^^^^^^^^^^^^
+
:Type: mutex
:Arch: any
:Protects: - vm_list
-:Name: kvm_count_lock
+``kvm_count_lock``
+^^^^^^^^^^^^^^^^^^
+
:Type: raw_spinlock_t
:Arch: any
:Protects: - hardware virtualization enable/disable
:Comment: 'raw' because hardware enabling/disabling must be atomic /wrt
migration.
-:Name: kvm_arch::tsc_write_lock
-:Type: raw_spinlock
+``kvm->mn_invalidate_lock``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+:Type: spinlock_t
+:Arch: any
+:Protects: mn_active_invalidate_count, mn_memslots_update_rcuwait
+
+``kvm_arch::tsc_write_lock``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+:Type: raw_spinlock_t
:Arch: x86
:Protects: - kvm_arch::{last_tsc_write,last_tsc_nsec,last_tsc_offset}
- tsc offset in vmcb
:Comment: 'raw' because updating the tsc offsets must not be preempted.
-:Name: kvm->mmu_lock
-:Type: spinlock_t
+``kvm->mmu_lock``
+^^^^^^^^^^^^^^^^^
+:Type: spinlock_t or rwlock_t
:Arch: any
:Protects: -shadow page/shadow tlb entry
:Comment: it is a spinlock since it is used in mmu notifier.
-:Name: kvm->srcu
+``kvm->srcu``
+^^^^^^^^^^^^^
:Type: srcu lock
:Arch: any
:Protects: - kvm->memslots
@@ -246,10 +261,20 @@ time it will be set using the Dirty tracking mechanism described above.
The srcu index can be stored in kvm_vcpu->srcu_idx per vcpu
if it is needed by multiple functions.
-:Name: blocked_vcpu_on_cpu_lock
+``kvm->slots_arch_lock``
+^^^^^^^^^^^^^^^^^^^^^^^^
+:Type: mutex
+:Arch: any (only needed on x86 though)
+:Protects: any arch-specific fields of memslots that have to be modified
+ in a ``kvm->srcu`` read-side critical section.
+:Comment: must be held before reading the pointer to the current memslots,
+ until after all changes to the memslots are complete
+
+``wakeup_vcpus_on_cpu_lock``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
:Type: spinlock_t
:Arch: x86
-:Protects: blocked_vcpu_on_cpu
+:Protects: wakeup_vcpus_on_cpu
:Comment: This is a per-CPU lock and it is used for VT-d posted-interrupts.
When VT-d posted-interrupts is supported and the VM has assigned
devices, we put the blocked vCPU on the list blocked_vcpu_on_cpu
diff --git a/Documentation/virt/kvm/s390/index.rst b/Documentation/virt/kvm/s390/index.rst
new file mode 100644
index 000000000000..605f488f0cc5
--- /dev/null
+++ b/Documentation/virt/kvm/s390/index.rst
@@ -0,0 +1,12 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+====================
+KVM for s390 systems
+====================
+
+.. toctree::
+ :maxdepth: 2
+
+ s390-diag
+ s390-pv
+ s390-pv-boot
diff --git a/Documentation/virt/kvm/s390-diag.rst b/Documentation/virt/kvm/s390/s390-diag.rst
index ca85f030eb0b..ca85f030eb0b 100644
--- a/Documentation/virt/kvm/s390-diag.rst
+++ b/Documentation/virt/kvm/s390/s390-diag.rst
diff --git a/Documentation/virt/kvm/s390-pv-boot.rst b/Documentation/virt/kvm/s390/s390-pv-boot.rst
index 73a6083cb5e7..73a6083cb5e7 100644
--- a/Documentation/virt/kvm/s390-pv-boot.rst
+++ b/Documentation/virt/kvm/s390/s390-pv-boot.rst
diff --git a/Documentation/virt/kvm/s390-pv.rst b/Documentation/virt/kvm/s390/s390-pv.rst
index 8e41a3b63fa5..8e41a3b63fa5 100644
--- a/Documentation/virt/kvm/s390-pv.rst
+++ b/Documentation/virt/kvm/s390/s390-pv.rst
diff --git a/Documentation/virt/kvm/vcpu-requests.rst b/Documentation/virt/kvm/vcpu-requests.rst
index b61d48aec36c..db43ee571f5a 100644
--- a/Documentation/virt/kvm/vcpu-requests.rst
+++ b/Documentation/virt/kvm/vcpu-requests.rst
@@ -135,6 +135,16 @@ KVM_REQ_UNHALT
such as a pending signal, which does not indicate the VCPU's halt
emulation should stop, and therefore does not make the request.
+KVM_REQ_OUTSIDE_GUEST_MODE
+
+ This "request" ensures the target vCPU has exited guest mode prior to the
+ sender of the request continuing on. No action needs be taken by the target,
+ and so no request is actually logged for the target. This request is similar
+ to a "kick", but unlike a kick it guarantees the vCPU has actually exited
+ guest mode. A kick only guarantees the vCPU will exit at some point in the
+ future, e.g. a previous kick may have started the process, but there's no
+ guarantee the to-be-kicked vCPU has fully exited guest mode.
+
KVM_REQUEST_MASK
----------------
diff --git a/Documentation/virt/kvm/amd-memory-encryption.rst b/Documentation/virt/kvm/x86/amd-memory-encryption.rst
index 1c6847fff304..1c6847fff304 100644
--- a/Documentation/virt/kvm/amd-memory-encryption.rst
+++ b/Documentation/virt/kvm/x86/amd-memory-encryption.rst
diff --git a/Documentation/virt/kvm/cpuid.rst b/Documentation/virt/kvm/x86/cpuid.rst
index bda3e3e737d7..bda3e3e737d7 100644
--- a/Documentation/virt/kvm/cpuid.rst
+++ b/Documentation/virt/kvm/x86/cpuid.rst
diff --git a/Documentation/virt/kvm/x86/errata.rst b/Documentation/virt/kvm/x86/errata.rst
new file mode 100644
index 000000000000..806f049b6975
--- /dev/null
+++ b/Documentation/virt/kvm/x86/errata.rst
@@ -0,0 +1,39 @@
+
+=======================================
+Known limitations of CPU virtualization
+=======================================
+
+Whenever perfect emulation of a CPU feature is impossible or too hard, KVM
+has to choose between not implementing the feature at all or introducing
+behavioral differences between virtual machines and bare metal systems.
+
+This file documents some of the known limitations that KVM has in
+virtualizing CPU features.
+
+x86
+===
+
+``KVM_GET_SUPPORTED_CPUID`` issues
+----------------------------------
+
+x87 features
+~~~~~~~~~~~~
+
+Unlike most other CPUID feature bits, CPUID[EAX=7,ECX=0]:EBX[6]
+(FDP_EXCPTN_ONLY) and CPUID[EAX=7,ECX=0]:EBX]13] (ZERO_FCS_FDS) are
+clear if the features are present and set if the features are not present.
+
+Clearing these bits in CPUID has no effect on the operation of the guest;
+if these bits are set on hardware, the features will not be present on
+any virtual machine that runs on that hardware.
+
+**Workaround:** It is recommended to always set these bits in guest CPUID.
+Note however that any software (e.g ``WIN87EM.DLL``) expecting these features
+to be present likely predates these CPUID feature bits, and therefore
+doesn't know to check for them anyway.
+
+Nested virtualization features
+------------------------------
+
+TBD
+
diff --git a/Documentation/virt/kvm/halt-polling.rst b/Documentation/virt/kvm/x86/halt-polling.rst
index 4922e4a15f18..4922e4a15f18 100644
--- a/Documentation/virt/kvm/halt-polling.rst
+++ b/Documentation/virt/kvm/x86/halt-polling.rst
diff --git a/Documentation/virt/kvm/hypercalls.rst b/Documentation/virt/kvm/x86/hypercalls.rst
index e56fa8b9cfca..e56fa8b9cfca 100644
--- a/Documentation/virt/kvm/hypercalls.rst
+++ b/Documentation/virt/kvm/x86/hypercalls.rst
diff --git a/Documentation/virt/kvm/x86/index.rst b/Documentation/virt/kvm/x86/index.rst
new file mode 100644
index 000000000000..7ff588826b9f
--- /dev/null
+++ b/Documentation/virt/kvm/x86/index.rst
@@ -0,0 +1,19 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+===================
+KVM for x86 systems
+===================
+
+.. toctree::
+ :maxdepth: 2
+
+ amd-memory-encryption
+ cpuid
+ errata
+ halt-polling
+ hypercalls
+ mmu
+ msr
+ nested-vmx
+ running-nested-guests
+ timekeeping
diff --git a/Documentation/virt/kvm/mmu.rst b/Documentation/virt/kvm/x86/mmu.rst
index 5b1ebad24c77..5b1ebad24c77 100644
--- a/Documentation/virt/kvm/mmu.rst
+++ b/Documentation/virt/kvm/x86/mmu.rst
diff --git a/Documentation/virt/kvm/msr.rst b/Documentation/virt/kvm/x86/msr.rst
index 9315fc385fb0..9315fc385fb0 100644
--- a/Documentation/virt/kvm/msr.rst
+++ b/Documentation/virt/kvm/x86/msr.rst
diff --git a/Documentation/virt/kvm/nested-vmx.rst b/Documentation/virt/kvm/x86/nested-vmx.rst
index ac2095d41f02..ac2095d41f02 100644
--- a/Documentation/virt/kvm/nested-vmx.rst
+++ b/Documentation/virt/kvm/x86/nested-vmx.rst
diff --git a/Documentation/virt/kvm/running-nested-guests.rst b/Documentation/virt/kvm/x86/running-nested-guests.rst
index bd70c69468ae..bd70c69468ae 100644
--- a/Documentation/virt/kvm/running-nested-guests.rst
+++ b/Documentation/virt/kvm/x86/running-nested-guests.rst
diff --git a/Documentation/virt/kvm/timekeeping.rst b/Documentation/virt/kvm/x86/timekeeping.rst
index 21ae7efa29ba..21ae7efa29ba 100644
--- a/Documentation/virt/kvm/timekeeping.rst
+++ b/Documentation/virt/kvm/x86/timekeeping.rst
diff --git a/Documentation/vm/page_owner.rst b/Documentation/vm/page_owner.rst
index c4de6f8dabe9..65204d7f004f 100644
--- a/Documentation/vm/page_owner.rst
+++ b/Documentation/vm/page_owner.rst
@@ -125,7 +125,6 @@ Usage
additional function:
Cull:
- -c Cull by comparing stacktrace instead of total block.
--cull <rules>
Specify culling rules.Culling syntax is key[,key[,...]].Choose a
multi-letter key from the **STANDARD FORMAT SPECIFIERS** section.
diff --git a/Documentation/vm/unevictable-lru.rst b/Documentation/vm/unevictable-lru.rst
index eae3af17f2d9..b280367d6a44 100644
--- a/Documentation/vm/unevictable-lru.rst
+++ b/Documentation/vm/unevictable-lru.rst
@@ -52,8 +52,13 @@ The infrastructure may also be able to handle other conditions that make pages
unevictable, either by definition or by circumstance, in the future.
-The Unevictable Page List
--------------------------
+The Unevictable LRU Page List
+-----------------------------
+
+The Unevictable LRU page list is a lie. It was never an LRU-ordered list, but a
+companion to the LRU-ordered anonymous and file, active and inactive page lists;
+and now it is not even a page list. But following familiar convention, here in
+this document and in the source, we often imagine it as a fifth LRU page list.
The Unevictable LRU infrastructure consists of an additional, per-node, LRU list
called the "unevictable" list and an associated page flag, PG_unevictable, to
@@ -63,8 +68,8 @@ The PG_unevictable flag is analogous to, and mutually exclusive with, the
PG_active flag in that it indicates on which LRU list a page resides when
PG_lru is set.
-The Unevictable LRU infrastructure maintains unevictable pages on an additional
-LRU list for a few reasons:
+The Unevictable LRU infrastructure maintains unevictable pages as if they were
+on an additional LRU list for a few reasons:
(1) We get to "treat unevictable pages just like we treat other pages in the
system - which means we get to use the same code to manipulate them, the
@@ -72,13 +77,11 @@ LRU list for a few reasons:
of the statistics, etc..." [Rik van Riel]
(2) We want to be able to migrate unevictable pages between nodes for memory
- defragmentation, workload management and memory hotplug. The linux kernel
+ defragmentation, workload management and memory hotplug. The Linux kernel
can only migrate pages that it can successfully isolate from the LRU
- lists. If we were to maintain pages elsewhere than on an LRU-like list,
- where they can be found by isolate_lru_page(), we would prevent their
- migration, unless we reworked migration code to find the unevictable pages
- itself.
-
+ lists (or "Movable" pages: outside of consideration here). If we were to
+ maintain pages elsewhere than on an LRU-like list, where they can be
+ detected by isolate_lru_page(), we would prevent their migration.
The unevictable list does not differentiate between file-backed and anonymous,
swap-backed pages. This differentiation is only important while the pages are,
@@ -92,8 +95,8 @@ Memory Control Group Interaction
--------------------------------
The unevictable LRU facility interacts with the memory control group [aka
-memory controller; see Documentation/admin-guide/cgroup-v1/memory.rst] by extending the
-lru_list enum.
+memory controller; see Documentation/admin-guide/cgroup-v1/memory.rst] by
+extending the lru_list enum.
The memory controller data structure automatically gets a per-node unevictable
list as a result of the "arrayification" of the per-node LRU lists (one per
@@ -143,7 +146,6 @@ These are currently used in three places in the kernel:
and this mark remains for the life of the inode.
(2) By SYSV SHM to mark SHM_LOCK'd address spaces until SHM_UNLOCK is called.
-
Note that SHM_LOCK is not required to page in the locked pages if they're
swapped out; the application must touch the pages manually if it wants to
ensure they're in memory.
@@ -156,19 +158,19 @@ These are currently used in three places in the kernel:
Detecting Unevictable Pages
---------------------------
-The function page_evictable() in vmscan.c determines whether a page is
+The function page_evictable() in mm/internal.h determines whether a page is
evictable or not using the query function outlined above [see section
:ref:`Marking address spaces unevictable <mark_addr_space_unevict>`]
to check the AS_UNEVICTABLE flag.
For address spaces that are so marked after being populated (as SHM regions
-might be), the lock action (eg: SHM_LOCK) can be lazy, and need not populate
+might be), the lock action (e.g. SHM_LOCK) can be lazy, and need not populate
the page tables for the region as does, for example, mlock(), nor need it make
any special effort to push any pages in the SHM_LOCK'd area to the unevictable
list. Instead, vmscan will do this if and when it encounters the pages during
a reclamation scan.
-On an unlock action (such as SHM_UNLOCK), the unlocker (eg: shmctl()) must scan
+On an unlock action (such as SHM_UNLOCK), the unlocker (e.g. shmctl()) must scan
the pages in the region and "rescue" them from the unevictable list if no other
condition is keeping them unevictable. If an unevictable region is destroyed,
the pages are also "rescued" from the unevictable list in the process of
@@ -176,7 +178,7 @@ freeing them.
page_evictable() also checks for mlocked pages by testing an additional page
flag, PG_mlocked (as wrapped by PageMlocked()), which is set when a page is
-faulted into a VM_LOCKED vma, or found in a vma being VM_LOCKED.
+faulted into a VM_LOCKED VMA, or found in a VMA being VM_LOCKED.
Vmscan's Handling of Unevictable Pages
@@ -186,28 +188,23 @@ If unevictable pages are culled in the fault path, or moved to the unevictable
list at mlock() or mmap() time, vmscan will not encounter the pages until they
have become evictable again (via munlock() for example) and have been "rescued"
from the unevictable list. However, there may be situations where we decide,
-for the sake of expediency, to leave a unevictable page on one of the regular
+for the sake of expediency, to leave an unevictable page on one of the regular
active/inactive LRU lists for vmscan to deal with. vmscan checks for such
pages in all of the shrink_{active|inactive|page}_list() functions and will
"cull" such pages that it encounters: that is, it diverts those pages to the
-unevictable list for the node being scanned.
+unevictable list for the memory cgroup and node being scanned.
There may be situations where a page is mapped into a VM_LOCKED VMA, but the
page is not marked as PG_mlocked. Such pages will make it all the way to
-shrink_page_list() where they will be detected when vmscan walks the reverse
-map in try_to_unmap(). If try_to_unmap() returns SWAP_MLOCK,
-shrink_page_list() will cull the page at that point.
+shrink_active_list() or shrink_page_list() where they will be detected when
+vmscan walks the reverse map in page_referenced() or try_to_unmap(). The page
+is culled to the unevictable list when it is released by the shrinker.
To "cull" an unevictable page, vmscan simply puts the page back on the LRU list
using putback_lru_page() - the inverse operation to isolate_lru_page() - after
dropping the page lock. Because the condition which makes the page unevictable
-may change once the page is unlocked, putback_lru_page() will recheck the
-unevictable state of a page that it places on the unevictable list. If the
-page has become unevictable, putback_lru_page() removes it from the list and
-retries, including the page_unevictable() test. Because such a race is a rare
-event and movement of pages onto the unevictable list should be rare, these
-extra evictabilty checks should not occur in the majority of calls to
-putback_lru_page().
+may change once the page is unlocked, __pagevec_lru_add_fn() will recheck the
+unevictable state of a page before placing it on the unevictable list.
MLOCKED Pages
@@ -227,16 +224,25 @@ Nick posted his patch as an alternative to a patch posted by Christoph Lameter
to achieve the same objective: hiding mlocked pages from vmscan.
In Nick's patch, he used one of the struct page LRU list link fields as a count
-of VM_LOCKED VMAs that map the page. This use of the link field for a count
-prevented the management of the pages on an LRU list, and thus mlocked pages
-were not migratable as isolate_lru_page() could not find them, and the LRU list
-link field was not available to the migration subsystem.
+of VM_LOCKED VMAs that map the page (Rik van Riel had the same idea three years
+earlier). But this use of the link field for a count prevented the management
+of the pages on an LRU list, and thus mlocked pages were not migratable as
+isolate_lru_page() could not detect them, and the LRU list link field was not
+available to the migration subsystem.
-Nick resolved this by putting mlocked pages back on the lru list before
+Nick resolved this by putting mlocked pages back on the LRU list before
attempting to isolate them, thus abandoning the count of VM_LOCKED VMAs. When
Nick's patch was integrated with the Unevictable LRU work, the count was
-replaced by walking the reverse map to determine whether any VM_LOCKED VMAs
-mapped the page. More on this below.
+replaced by walking the reverse map when munlocking, to determine whether any
+other VM_LOCKED VMAs still mapped the page.
+
+However, walking the reverse map for each page when munlocking was ugly and
+inefficient, and could lead to catastrophic contention on a file's rmap lock,
+when many processes which had it mlocked were trying to exit. In 5.18, the
+idea of keeping mlock_count in Unevictable LRU list link field was revived and
+put to work, without preventing the migration of mlocked pages. This is why
+the "Unevictable LRU list" cannot be a linked list of pages now; but there was
+no use for that linked list anyway - though its size is maintained for meminfo.
Basic Management
@@ -250,22 +256,18 @@ PageMlocked() functions.
A PG_mlocked page will be placed on the unevictable list when it is added to
the LRU. Such pages can be "noticed" by memory management in several places:
- (1) in the mlock()/mlockall() system call handlers;
+ (1) in the mlock()/mlock2()/mlockall() system call handlers;
(2) in the mmap() system call handler when mmapping a region with the
MAP_LOCKED flag;
(3) mmapping a region in a task that has called mlockall() with the MCL_FUTURE
- flag
+ flag;
- (4) in the fault path, if mlocked pages are "culled" in the fault path,
- and when a VM_LOCKED stack segment is expanded; or
+ (4) in the fault path and when a VM_LOCKED stack segment is expanded; or
(5) as mentioned above, in vmscan:shrink_page_list() when attempting to
- reclaim a page in a VM_LOCKED VMA via try_to_unmap()
-
-all of which result in the VM_LOCKED flag being set for the VMA if it doesn't
-already have it set.
+ reclaim a page in a VM_LOCKED VMA by page_referenced() or try_to_unmap().
mlocked pages become unlocked and rescued from the unevictable list when:
@@ -280,51 +282,53 @@ mlocked pages become unlocked and rescued from the unevictable list when:
(4) before a page is COW'd in a VM_LOCKED VMA.
-mlock()/mlockall() System Call Handling
----------------------------------------
+mlock()/mlock2()/mlockall() System Call Handling
+------------------------------------------------
-Both [do\_]mlock() and [do\_]mlockall() system call handlers call mlock_fixup()
+mlock(), mlock2() and mlockall() system call handlers proceed to mlock_fixup()
for each VMA in the range specified by the call. In the case of mlockall(),
this is the entire active address space of the task. Note that mlock_fixup()
is used for both mlocking and munlocking a range of memory. A call to mlock()
-an already VM_LOCKED VMA, or to munlock() a VMA that is not VM_LOCKED is
-treated as a no-op, and mlock_fixup() simply returns.
+an already VM_LOCKED VMA, or to munlock() a VMA that is not VM_LOCKED, is
+treated as a no-op and mlock_fixup() simply returns.
-If the VMA passes some filtering as described in "Filtering Special Vmas"
+If the VMA passes some filtering as described in "Filtering Special VMAs"
below, mlock_fixup() will attempt to merge the VMA with its neighbors or split
-off a subset of the VMA if the range does not cover the entire VMA. Once the
-VMA has been merged or split or neither, mlock_fixup() will call
-populate_vma_page_range() to fault in the pages via get_user_pages() and to
-mark the pages as mlocked via mlock_vma_page().
+off a subset of the VMA if the range does not cover the entire VMA. Any pages
+already present in the VMA are then marked as mlocked by mlock_page() via
+mlock_pte_range() via walk_page_range() via mlock_vma_pages_range().
+
+Before returning from the system call, do_mlock() or mlockall() will call
+__mm_populate() to fault in the remaining pages via get_user_pages() and to
+mark those pages as mlocked as they are faulted.
Note that the VMA being mlocked might be mapped with PROT_NONE. In this case,
get_user_pages() will be unable to fault in the pages. That's okay. If pages
-do end up getting faulted into this VM_LOCKED VMA, we'll handle them in the
-fault path or in vmscan.
-
-Also note that a page returned by get_user_pages() could be truncated or
-migrated out from under us, while we're trying to mlock it. To detect this,
-populate_vma_page_range() checks page_mapping() after acquiring the page lock.
-If the page is still associated with its mapping, we'll go ahead and call
-mlock_vma_page(). If the mapping is gone, we just unlock the page and move on.
-In the worst case, this will result in a page mapped in a VM_LOCKED VMA
-remaining on a normal LRU list without being PageMlocked(). Again, vmscan will
-detect and cull such pages.
-
-mlock_vma_page() will call TestSetPageMlocked() for each page returned by
-get_user_pages(). We use TestSetPageMlocked() because the page might already
-be mlocked by another task/VMA and we don't want to do extra work. We
-especially do not want to count an mlocked page more than once in the
-statistics. If the page was already mlocked, mlock_vma_page() need do nothing
-more.
-
-If the page was NOT already mlocked, mlock_vma_page() attempts to isolate the
-page from the LRU, as it is likely on the appropriate active or inactive list
-at that time. If the isolate_lru_page() succeeds, mlock_vma_page() will put
-back the page - by calling putback_lru_page() - which will notice that the page
-is now mlocked and divert the page to the node's unevictable list. If
-mlock_vma_page() is unable to isolate the page from the LRU, vmscan will handle
-it later if and when it attempts to reclaim the page.
+do end up getting faulted into this VM_LOCKED VMA, they will be handled in the
+fault path - which is also how mlock2()'s MLOCK_ONFAULT areas are handled.
+
+For each PTE (or PMD) being faulted into a VMA, the page add rmap function
+calls mlock_vma_page(), which calls mlock_page() when the VMA is VM_LOCKED
+(unless it is a PTE mapping of a part of a transparent huge page). Or when
+it is a newly allocated anonymous page, lru_cache_add_inactive_or_unevictable()
+calls mlock_new_page() instead: similar to mlock_page(), but can make better
+judgments, since this page is held exclusively and known not to be on LRU yet.
+
+mlock_page() sets PageMlocked immediately, then places the page on the CPU's
+mlock pagevec, to batch up the rest of the work to be done under lru_lock by
+__mlock_page(). __mlock_page() sets PageUnevictable, initializes mlock_count
+and moves the page to unevictable state ("the unevictable LRU", but with
+mlock_count in place of LRU threading). Or if the page was already PageLRU
+and PageUnevictable and PageMlocked, it simply increments the mlock_count.
+
+But in practice that may not work ideally: the page may not yet be on an LRU, or
+it may have been temporarily isolated from LRU. In such cases the mlock_count
+field cannot be touched, but will be set to 0 later when __pagevec_lru_add_fn()
+returns the page to "LRU". Races prohibit mlock_count from being set to 1 then:
+rather than risk stranding a page indefinitely as unevictable, always err with
+mlock_count on the low side, so that when munlocked the page will be rescued to
+an evictable LRU, then perhaps be mlocked again later if vmscan finds it in a
+VM_LOCKED VMA.
Filtering Special VMAs
@@ -339,68 +343,48 @@ mlock_fixup() filters several classes of "special" VMAs:
so there is no sense in attempting to visit them.
2) VMAs mapping hugetlbfs page are already effectively pinned into memory. We
- neither need nor want to mlock() these pages. However, to preserve the
- prior behavior of mlock() - before the unevictable/mlock changes -
- mlock_fixup() will call make_pages_present() in the hugetlbfs VMA range to
- allocate the huge pages and populate the ptes.
+ neither need nor want to mlock() these pages. But __mm_populate() includes
+ hugetlbfs ranges, allocating the huge pages and populating the PTEs.
3) VMAs with VM_DONTEXPAND are generally userspace mappings of kernel pages,
- such as the VDSO page, relay channel pages, etc. These pages
- are inherently unevictable and are not managed on the LRU lists.
- mlock_fixup() treats these VMAs the same as hugetlbfs VMAs. It calls
- make_pages_present() to populate the ptes.
+ such as the VDSO page, relay channel pages, etc. These pages are inherently
+ unevictable and are not managed on the LRU lists. __mm_populate() includes
+ these ranges, populating the PTEs if not already populated.
+
+4) VMAs with VM_MIXEDMAP set are not marked VM_LOCKED, but __mm_populate()
+ includes these ranges, populating the PTEs if not already populated.
Note that for all of these special VMAs, mlock_fixup() does not set the
VM_LOCKED flag. Therefore, we won't have to deal with them later during
munlock(), munmap() or task exit. Neither does mlock_fixup() account these
VMAs against the task's "locked_vm".
-.. _munlock_munlockall_handling:
munlock()/munlockall() System Call Handling
-------------------------------------------
-The munlock() and munlockall() system calls are handled by the same functions -
-do_mlock[all]() - as the mlock() and mlockall() system calls with the unlock vs
-lock operation indicated by an argument. So, these system calls are also
-handled by mlock_fixup(). Again, if called for an already munlocked VMA,
-mlock_fixup() simply returns. Because of the VMA filtering discussed above,
-VM_LOCKED will not be set in any "special" VMAs. So, these VMAs will be
-ignored for munlock.
+The munlock() and munlockall() system calls are handled by the same
+mlock_fixup() function as mlock(), mlock2() and mlockall() system calls are.
+If called to munlock an already munlocked VMA, mlock_fixup() simply returns.
+Because of the VMA filtering discussed above, VM_LOCKED will not be set in
+any "special" VMAs. So, those VMAs will be ignored for munlock.
If the VMA is VM_LOCKED, mlock_fixup() again attempts to merge or split off the
-specified range. The range is then munlocked via the function
-populate_vma_page_range() - the same function used to mlock a VMA range -
-passing a flag to indicate that munlock() is being performed.
-
-Because the VMA access protections could have been changed to PROT_NONE after
-faulting in and mlocking pages, get_user_pages() was unreliable for visiting
-these pages for munlocking. Because we don't want to leave pages mlocked,
-get_user_pages() was enhanced to accept a flag to ignore the permissions when
-fetching the pages - all of which should be resident as a result of previous
-mlocking.
-
-For munlock(), populate_vma_page_range() unlocks individual pages by calling
-munlock_vma_page(). munlock_vma_page() unconditionally clears the PG_mlocked
-flag using TestClearPageMlocked(). As with mlock_vma_page(),
-munlock_vma_page() use the Test*PageMlocked() function to handle the case where
-the page might have already been unlocked by another task. If the page was
-mlocked, munlock_vma_page() updates that zone statistics for the number of
-mlocked pages. Note, however, that at this point we haven't checked whether
-the page is mapped by other VM_LOCKED VMAs.
-
-We can't call page_mlock(), the function that walks the reverse map to
-check for other VM_LOCKED VMAs, without first isolating the page from the LRU.
-page_mlock() is a variant of try_to_unmap() and thus requires that the page
-not be on an LRU list [more on these below]. However, the call to
-isolate_lru_page() could fail, in which case we can't call page_mlock(). So,
-we go ahead and clear PG_mlocked up front, as this might be the only chance we
-have. If we can successfully isolate the page, we go ahead and call
-page_mlock(), which will restore the PG_mlocked flag and update the zone
-page statistics if it finds another VMA holding the page mlocked. If we fail
-to isolate the page, we'll have left a potentially mlocked page on the LRU.
-This is fine, because we'll catch it later if and if vmscan tries to reclaim
-the page. This should be relatively rare.
+specified range. All pages in the VMA are then munlocked by munlock_page() via
+mlock_pte_range() via walk_page_range() via mlock_vma_pages_range() - the same
+function used when mlocking a VMA range, with new flags for the VMA indicating
+that it is munlock() being performed.
+
+munlock_page() uses the mlock pagevec to batch up work to be done under
+lru_lock by __munlock_page(). __munlock_page() decrements the page's
+mlock_count, and when that reaches 0 it clears PageMlocked and clears
+PageUnevictable, moving the page from unevictable state to inactive LRU.
+
+But in practice that may not work ideally: the page may not yet have reached
+"the unevictable LRU", or it may have been temporarily isolated from it. In
+those cases its mlock_count field is unusable and must be assumed to be 0: so
+that the page will be rescued to an evictable LRU, then perhaps be mlocked
+again later if vmscan finds it in a VM_LOCKED VMA.
Migrating MLOCKED Pages
@@ -410,33 +394,38 @@ A page that is being migrated has been isolated from the LRU lists and is held
locked across unmapping of the page, updating the page's address space entry
and copying the contents and state, until the page table entry has been
replaced with an entry that refers to the new page. Linux supports migration
-of mlocked pages and other unevictable pages. This involves simply moving the
-PG_mlocked and PG_unevictable states from the old page to the new page.
+of mlocked pages and other unevictable pages. PG_mlocked is cleared from the
+the old page when it is unmapped from the last VM_LOCKED VMA, and set when the
+new page is mapped in place of migration entry in a VM_LOCKED VMA. If the page
+was unevictable because mlocked, PG_unevictable follows PG_mlocked; but if the
+page was unevictable for other reasons, PG_unevictable is copied explicitly.
Note that page migration can race with mlocking or munlocking of the same page.
-This has been discussed from the mlock/munlock perspective in the respective
-sections above. Both processes (migration and m[un]locking) hold the page
-locked. This provides the first level of synchronization. Page migration
-zeros out the page_mapping of the old page before unlocking it, so m[un]lock
-can skip these pages by testing the page mapping under page lock.
+There is mostly no problem since page migration requires unmapping all PTEs of
+the old page (including munlock where VM_LOCKED), then mapping in the new page
+(including mlock where VM_LOCKED). The page table locks provide sufficient
+synchronization.
-To complete page migration, we place the new and old pages back onto the LRU
-after dropping the page lock. The "unneeded" page - old page on success, new
-page on failure - will be freed when the reference count held by the migration
-process is released. To ensure that we don't strand pages on the unevictable
-list because of a race between munlock and migration, page migration uses the
-putback_lru_page() function to add migrated pages back to the LRU.
+However, since mlock_vma_pages_range() starts by setting VM_LOCKED on a VMA,
+before mlocking any pages already present, if one of those pages were migrated
+before mlock_pte_range() reached it, it would get counted twice in mlock_count.
+To prevent that, mlock_vma_pages_range() temporarily marks the VMA as VM_IO,
+so that mlock_vma_page() will skip it.
+
+To complete page migration, we place the old and new pages back onto the LRU
+afterwards. The "unneeded" page - old page on success, new page on failure -
+is freed when the reference count held by the migration process is released.
Compacting MLOCKED Pages
------------------------
-The unevictable LRU can be scanned for compactable regions and the default
-behavior is to do so. /proc/sys/vm/compact_unevictable_allowed controls
-this behavior (see Documentation/admin-guide/sysctl/vm.rst). Once scanning of the
-unevictable LRU is enabled, the work of compaction is mostly handled by
-the page migration code and the same work flow as described in MIGRATING
-MLOCKED PAGES will apply.
+The memory map can be scanned for compactable regions and the default behavior
+is to let unevictable pages be moved. /proc/sys/vm/compact_unevictable_allowed
+controls this behavior (see Documentation/admin-guide/sysctl/vm.rst). The work
+of compaction is mostly handled by the page migration code and the same work
+flow as described in Migrating MLOCKED Pages will apply.
+
MLOCKING Transparent Huge Pages
-------------------------------
@@ -445,51 +434,44 @@ A transparent huge page is represented by a single entry on an LRU list.
Therefore, we can only make unevictable an entire compound page, not
individual subpages.
-If a user tries to mlock() part of a huge page, we want the rest of the
-page to be reclaimable.
+If a user tries to mlock() part of a huge page, and no user mlock()s the
+whole of the huge page, we want the rest of the page to be reclaimable.
We cannot just split the page on partial mlock() as split_huge_page() can
-fail and new intermittent failure mode for the syscall is undesirable.
+fail and a new intermittent failure mode for the syscall is undesirable.
-We handle this by keeping PTE-mapped huge pages on normal LRU lists: the
-PMD on border of VM_LOCKED VMA will be split into PTE table.
+We handle this by keeping PTE-mlocked huge pages on evictable LRU lists:
+the PMD on the border of a VM_LOCKED VMA will be split into a PTE table.
-This way the huge page is accessible for vmscan. Under memory pressure the
+This way the huge page is accessible for vmscan. Under memory pressure the
page will be split, subpages which belong to VM_LOCKED VMAs will be moved
-to unevictable LRU and the rest can be reclaimed.
+to the unevictable LRU and the rest can be reclaimed.
+
+/proc/meminfo's Unevictable and Mlocked amounts do not include those parts
+of a transparent huge page which are mapped only by PTEs in VM_LOCKED VMAs.
-See also comment in follow_trans_huge_pmd().
mmap(MAP_LOCKED) System Call Handling
-------------------------------------
-In addition the mlock()/mlockall() system calls, an application can request
-that a region of memory be mlocked supplying the MAP_LOCKED flag to the mmap()
-call. There is one important and subtle difference here, though. mmap() + mlock()
-will fail if the range cannot be faulted in (e.g. because mm_populate fails)
-and returns with ENOMEM while mmap(MAP_LOCKED) will not fail. The mmaped
-area will still have properties of the locked area - aka. pages will not get
-swapped out - but major page faults to fault memory in might still happen.
+In addition to the mlock(), mlock2() and mlockall() system calls, an application
+can request that a region of memory be mlocked by supplying the MAP_LOCKED flag
+to the mmap() call. There is one important and subtle difference here, though.
+mmap() + mlock() will fail if the range cannot be faulted in (e.g. because
+mm_populate fails) and returns with ENOMEM while mmap(MAP_LOCKED) will not fail.
+The mmaped area will still have properties of the locked area - pages will not
+get swapped out - but major page faults to fault memory in might still happen.
-Furthermore, any mmap() call or brk() call that expands the heap by a
-task that has previously called mlockall() with the MCL_FUTURE flag will result
+Furthermore, any mmap() call or brk() call that expands the heap by a task
+that has previously called mlockall() with the MCL_FUTURE flag will result
in the newly mapped memory being mlocked. Before the unevictable/mlock
-changes, the kernel simply called make_pages_present() to allocate pages and
-populate the page table.
+changes, the kernel simply called make_pages_present() to allocate pages
+and populate the page table.
-To mlock a range of memory under the unevictable/mlock infrastructure, the
-mmap() handler and task address space expansion functions call
+To mlock a range of memory under the unevictable/mlock infrastructure,
+the mmap() handler and task address space expansion functions call
populate_vma_page_range() specifying the vma and the address range to mlock.
-The callers of populate_vma_page_range() will have already added the memory range
-to be mlocked to the task's "locked_vm". To account for filtered VMAs,
-populate_vma_page_range() returns the number of pages NOT mlocked. All of the
-callers then subtract a non-negative return value from the task's locked_vm. A
-negative return value represent an error - for example, from get_user_pages()
-attempting to fault in a VMA with PROT_NONE access. In this case, we leave the
-memory range accounted as locked_vm, as the protections could be changed later
-and pages allocated into that region.
-
munmap()/exit()/exec() System Call Handling
-------------------------------------------
@@ -500,81 +482,53 @@ munlock the pages if we're removing the last VM_LOCKED VMA that maps the pages.
Before the unevictable/mlock changes, mlocking did not mark the pages in any
way, so unmapping them required no processing.
-To munlock a range of memory under the unevictable/mlock infrastructure, the
-munmap() handler and task address space call tear down function
-munlock_vma_pages_all(). The name reflects the observation that one always
-specifies the entire VMA range when munlock()ing during unmap of a region.
-Because of the VMA filtering when mlocking() regions, only "normal" VMAs that
-actually contain mlocked pages will be passed to munlock_vma_pages_all().
-
-munlock_vma_pages_all() clears the VM_LOCKED VMA flag and, like mlock_fixup()
-for the munlock case, calls __munlock_vma_pages_range() to walk the page table
-for the VMA's memory range and munlock_vma_page() each resident page mapped by
-the VMA. This effectively munlocks the page, only if this is the last
-VM_LOCKED VMA that maps the page.
-
-
-try_to_unmap()
---------------
-
-Pages can, of course, be mapped into multiple VMAs. Some of these VMAs may
-have VM_LOCKED flag set. It is possible for a page mapped into one or more
-VM_LOCKED VMAs not to have the PG_mlocked flag set and therefore reside on one
-of the active or inactive LRU lists. This could happen if, for example, a task
-in the process of munlocking the page could not isolate the page from the LRU.
-As a result, vmscan/shrink_page_list() might encounter such a page as described
-in section "vmscan's handling of unevictable pages". To handle this situation,
-try_to_unmap() checks for VM_LOCKED VMAs while it is walking a page's reverse
-map.
-
-try_to_unmap() is always called, by either vmscan for reclaim or for page
-migration, with the argument page locked and isolated from the LRU. Separate
-functions handle anonymous and mapped file and KSM pages, as these types of
-pages have different reverse map lookup mechanisms, with different locking.
-In each case, whether rmap_walk_anon() or rmap_walk_file() or rmap_walk_ksm(),
-it will call try_to_unmap_one() for every VMA which might contain the page.
-
-When trying to reclaim, if try_to_unmap_one() finds the page in a VM_LOCKED
-VMA, it will then mlock the page via mlock_vma_page() instead of unmapping it,
-and return SWAP_MLOCK to indicate that the page is unevictable: and the scan
-stops there.
-
-mlock_vma_page() is called while holding the page table's lock (in addition
-to the page lock, and the rmap lock): to serialize against concurrent mlock or
-munlock or munmap system calls, mm teardown (munlock_vma_pages_all), reclaim,
-holepunching, and truncation of file pages and their anonymous COWed pages.
-
-
-page_mlock() Reverse Map Scan
----------------------------------
-
-When munlock_vma_page() [see section :ref:`munlock()/munlockall() System Call
-Handling <munlock_munlockall_handling>` above] tries to munlock a
-page, it needs to determine whether or not the page is mapped by any
-VM_LOCKED VMA without actually attempting to unmap all PTEs from the
-page. For this purpose, the unevictable/mlock infrastructure
-introduced a variant of try_to_unmap() called page_mlock().
-
-page_mlock() walks the respective reverse maps looking for VM_LOCKED VMAs. When
-such a VMA is found the page is mlocked via mlock_vma_page(). This undoes the
-pre-clearing of the page's PG_mlocked done by munlock_vma_page.
-
-Note that page_mlock()'s reverse map walk must visit every VMA in a page's
-reverse map to determine that a page is NOT mapped into any VM_LOCKED VMA.
-However, the scan can terminate when it encounters a VM_LOCKED VMA.
-Although page_mlock() might be called a great many times when munlocking a
-large region or tearing down a large address space that has been mlocked via
-mlockall(), overall this is a fairly rare event.
+For each PTE (or PMD) being unmapped from a VMA, page_remove_rmap() calls
+munlock_vma_page(), which calls munlock_page() when the VMA is VM_LOCKED
+(unless it was a PTE mapping of a part of a transparent huge page).
+
+munlock_page() uses the mlock pagevec to batch up work to be done under
+lru_lock by __munlock_page(). __munlock_page() decrements the page's
+mlock_count, and when that reaches 0 it clears PageMlocked and clears
+PageUnevictable, moving the page from unevictable state to inactive LRU.
+
+But in practice that may not work ideally: the page may not yet have reached
+"the unevictable LRU", or it may have been temporarily isolated from it. In
+those cases its mlock_count field is unusable and must be assumed to be 0: so
+that the page will be rescued to an evictable LRU, then perhaps be mlocked
+again later if vmscan finds it in a VM_LOCKED VMA.
+
+
+Truncating MLOCKED Pages
+------------------------
+
+File truncation or hole punching forcibly unmaps the deleted pages from
+userspace; truncation even unmaps and deletes any private anonymous pages
+which had been Copied-On-Write from the file pages now being truncated.
+
+Mlocked pages can be munlocked and deleted in this way: like with munmap(),
+for each PTE (or PMD) being unmapped from a VMA, page_remove_rmap() calls
+munlock_vma_page(), which calls munlock_page() when the VMA is VM_LOCKED
+(unless it was a PTE mapping of a part of a transparent huge page).
+
+However, if there is a racing munlock(), since mlock_vma_pages_range() starts
+munlocking by clearing VM_LOCKED from a VMA, before munlocking all the pages
+present, if one of those pages were unmapped by truncation or hole punch before
+mlock_pte_range() reached it, it would not be recognized as mlocked by this VMA,
+and would not be counted out of mlock_count. In this rare case, a page may
+still appear as PageMlocked after it has been fully unmapped: and it is left to
+release_pages() (or __page_cache_release()) to clear it and update statistics
+before freeing (this event is counted in /proc/vmstat unevictable_pgs_cleared,
+which is usually 0).
Page Reclaim in shrink_*_list()
-------------------------------
-shrink_active_list() culls any obviously unevictable pages - i.e.
-!page_evictable(page) - diverting these to the unevictable list.
+vmscan's shrink_active_list() culls any obviously unevictable pages -
+i.e. !page_evictable(page) pages - diverting those to the unevictable list.
However, shrink_active_list() only sees unevictable pages that made it onto the
-active/inactive lru lists. Note that these pages do not have PageUnevictable
-set - otherwise they would be on the unevictable list and shrink_active_list
+active/inactive LRU lists. Note that these pages do not have PageUnevictable
+set - otherwise they would be on the unevictable list and shrink_active_list()
would never see them.
Some examples of these unevictable pages on the LRU lists are:
@@ -586,20 +540,15 @@ Some examples of these unevictable pages on the LRU lists are:
when an application accesses the page the first time after SHM_LOCK'ing
the segment.
- (3) mlocked pages that could not be isolated from the LRU and moved to the
- unevictable list in mlock_vma_page().
-
-shrink_inactive_list() also diverts any unevictable pages that it finds on the
-inactive lists to the appropriate node's unevictable list.
+ (3) pages still mapped into VM_LOCKED VMAs, which should be marked mlocked,
+ but events left mlock_count too low, so they were munlocked too early.
-shrink_inactive_list() should only see SHM_LOCK'd pages that became SHM_LOCK'd
-after shrink_active_list() had moved them to the inactive list, or pages mapped
-into VM_LOCKED VMAs that munlock_vma_page() couldn't isolate from the LRU to
-recheck via page_mlock(). shrink_inactive_list() won't notice the latter,
-but will pass on to shrink_page_list().
+vmscan's shrink_inactive_list() and shrink_page_list() also divert obviously
+unevictable pages found on the inactive lists to the appropriate memory cgroup
+and node unevictable list.
-shrink_page_list() again culls obviously unevictable pages that it could
-encounter for similar reason to shrink_inactive_list(). Pages mapped into
-VM_LOCKED VMAs but without PG_mlocked set will make it all the way to
-try_to_unmap(). shrink_page_list() will divert them to the unevictable list
-when try_to_unmap() returns SWAP_MLOCK, as discussed above.
+rmap's page_referenced_one(), called via vmscan's shrink_active_list() or
+shrink_page_list(), and rmap's try_to_unmap_one() called via shrink_page_list(),
+check for (3) pages still mapped into VM_LOCKED VMAs, and call mlock_vma_page()
+to correct them. Such pages are culled to the unevictable list when released
+by the shrinker.
diff --git a/MAINTAINERS b/MAINTAINERS
index cbbd3ce7e0c2..fd768d43e048 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -4640,6 +4640,7 @@ F: drivers/input/touchscreen/chipone_icn8505.c
CHROME HARDWARE PLATFORM SUPPORT
M: Benson Leung <bleung@chromium.org>
+L: chrome-platform@lists.linux.dev
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/chrome-platform/linux.git
F: drivers/platform/chrome/
@@ -4648,6 +4649,7 @@ CHROMEOS EC CODEC DRIVER
M: Cheng-Yi Chiang <cychiang@chromium.org>
M: Tzung-Bi Shih <tzungbi@google.com>
R: Guenter Roeck <groeck@chromium.org>
+L: chrome-platform@lists.linux.dev
S: Maintained
F: Documentation/devicetree/bindings/sound/google,cros-ec-codec.yaml
F: sound/soc/codecs/cros_ec_codec.*
@@ -4655,6 +4657,7 @@ F: sound/soc/codecs/cros_ec_codec.*
CHROMEOS EC SUBDRIVERS
M: Benson Leung <bleung@chromium.org>
R: Guenter Roeck <groeck@chromium.org>
+L: chrome-platform@lists.linux.dev
S: Maintained
F: drivers/power/supply/cros_usbpd-charger.c
N: cros_ec
@@ -4662,11 +4665,13 @@ N: cros-ec
CHROMEOS EC USB TYPE-C DRIVER
M: Prashant Malani <pmalani@chromium.org>
+L: chrome-platform@lists.linux.dev
S: Maintained
F: drivers/platform/chrome/cros_ec_typec.c
CHROMEOS EC USB PD NOTIFY DRIVER
M: Prashant Malani <pmalani@chromium.org>
+L: chrome-platform@lists.linux.dev
S: Maintained
F: drivers/platform/chrome/cros_usbpd_notify.c
F: include/linux/platform_data/cros_usbpd_notify.h
@@ -5157,6 +5162,20 @@ S: Supported
F: drivers/cpuidle/cpuidle-psci.h
F: drivers/cpuidle/cpuidle-psci-domain.c
+CPUIDLE DRIVER - DT IDLE PM DOMAIN
+M: Ulf Hansson <ulf.hansson@linaro.org>
+L: linux-pm@vger.kernel.org
+S: Supported
+F: drivers/cpuidle/dt_idle_genpd.c
+F: drivers/cpuidle/dt_idle_genpd.h
+
+CPUIDLE DRIVER - RISC-V SBI
+M: Anup Patel <anup@brainfault.org>
+L: linux-pm@vger.kernel.org
+L: linux-riscv@lists.infradead.org
+S: Maintained
+F: drivers/cpuidle/cpuidle-riscv-sbi.c
+
CRAMFS FILESYSTEM
M: Nicolas Pitre <nico@fluxnic.net>
S: Maintained
@@ -6038,6 +6057,7 @@ F: drivers/scsi/dpt/
DRBD DRIVER
M: Philipp Reisner <philipp.reisner@linbit.com>
M: Lars Ellenberg <lars.ellenberg@linbit.com>
+M: Christoph Böhmwalder <christoph.boehmwalder@linbit.com>
L: drbd-dev@lists.linbit.com
S: Supported
W: http://www.drbd.org
@@ -9517,6 +9537,12 @@ M: Stanislaw Gruszka <stf_xl@wp.pl>
S: Maintained
F: drivers/usb/atm/ueagle-atm.c
+IMAGIS TOUCHSCREEN DRIVER
+M: Markuss Broks <markuss.broks@gmail.com>
+S: Maintained
+F: Documentation/devicetree/bindings/input/touchscreen/imagis,ist3038c.yaml
+F: drivers/input/touchscreen/imagis.c
+
IMGTEC ASCII LCD DRIVER
M: Paul Burton <paulburton@kernel.org>
S: Maintained
@@ -10648,9 +10674,9 @@ F: tools/testing/selftests/
KERNEL SMB3 SERVER (KSMBD)
M: Namjae Jeon <linkinjeon@kernel.org>
-M: Sergey Senozhatsky <senozhatsky@chromium.org>
M: Steve French <sfrench@samba.org>
M: Hyunchul Lee <hyc.lee@gmail.com>
+R: Sergey Senozhatsky <senozhatsky@chromium.org>
L: linux-cifs@vger.kernel.org
S: Maintained
T: git git://git.samba.org/ksmbd.git
@@ -14622,6 +14648,12 @@ L: op-tee@lists.trustedfirmware.org
S: Maintained
F: drivers/char/hw_random/optee-rng.c
+OP-TEE RTC DRIVER
+M: Clément Léger <clement.leger@bootlin.com>
+L: linux-rtc@vger.kernel.org
+S: Maintained
+F: drivers/rtc/rtc-optee.c
+
OPA-VNIC DRIVER
M: Dennis Dalessandro <dennis.dalessandro@cornelisnetworks.com>
M: Mike Marciniszyn <mike.marciniszyn@cornelisnetworks.com>
diff --git a/arch/alpha/include/asm/user.h b/arch/alpha/include/asm/user.h
index 3df37492c7b7..c9f525a6aab8 100644
--- a/arch/alpha/include/asm/user.h
+++ b/arch/alpha/include/asm/user.h
@@ -45,10 +45,4 @@ struct user {
char u_comm[32]; /* user command name */
};
-#define NBPG PAGE_SIZE
-#define UPAGES 1
-#define HOST_TEXT_START_ADDR (u.start_code)
-#define HOST_DATA_START_ADDR (u.start_data)
-#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG)
-
#endif /* _ALPHA_USER_H */
diff --git a/arch/arm/boot/dts/spear1340.dtsi b/arch/arm/boot/dts/spear1340.dtsi
index 827e887afbda..13e1bdb3ddbf 100644
--- a/arch/arm/boot/dts/spear1340.dtsi
+++ b/arch/arm/boot/dts/spear1340.dtsi
@@ -134,9 +134,9 @@
reg = <0xb4100000 0x1000>;
interrupts = <0 105 0x4>;
status = "disabled";
- dmas = <&dwdma0 12 0 1>,
- <&dwdma0 13 1 0>;
- dma-names = "tx", "rx";
+ dmas = <&dwdma0 13 0 1>,
+ <&dwdma0 12 1 0>;
+ dma-names = "rx", "tx";
};
thermal@e07008c4 {
diff --git a/arch/arm/boot/dts/spear13xx.dtsi b/arch/arm/boot/dts/spear13xx.dtsi
index c87b881b2c8b..913553367687 100644
--- a/arch/arm/boot/dts/spear13xx.dtsi
+++ b/arch/arm/boot/dts/spear13xx.dtsi
@@ -284,9 +284,9 @@
#size-cells = <0>;
interrupts = <0 31 0x4>;
status = "disabled";
- dmas = <&dwdma0 4 0 0>,
- <&dwdma0 5 0 0>;
- dma-names = "tx", "rx";
+ dmas = <&dwdma0 5 0 0>,
+ <&dwdma0 4 0 0>;
+ dma-names = "rx", "tx";
};
rtc@e0580000 {
diff --git a/arch/arm/include/asm/user.h b/arch/arm/include/asm/user.h
index c799a3c49342..167d44b550f4 100644
--- a/arch/arm/include/asm/user.h
+++ b/arch/arm/include/asm/user.h
@@ -77,10 +77,6 @@ struct user{
struct user_fp_struct * u_fp0;/* Used by gdb to help find the values for */
/* the FP registers. */
};
-#define NBPG PAGE_SIZE
-#define UPAGES 1
-#define HOST_TEXT_START_ADDR (u.start_code)
-#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG)
/*
* User specific VFP registers. If only VFPv2 is present, registers 16 to 31
diff --git a/arch/arm/mach-omap2/omap-secure.c b/arch/arm/mach-omap2/omap-secure.c
index 0659ab4cb0af..11677fc2968f 100644
--- a/arch/arm/mach-omap2/omap-secure.c
+++ b/arch/arm/mach-omap2/omap-secure.c
@@ -59,8 +59,13 @@ static void __init omap_optee_init_check(void)
u32 omap_secure_dispatcher(u32 idx, u32 flag, u32 nargs, u32 arg1, u32 arg2,
u32 arg3, u32 arg4)
{
+ static u32 buf[NR_CPUS][5];
+ u32 *param;
+ int cpu;
u32 ret;
- u32 param[5];
+
+ cpu = get_cpu();
+ param = buf[cpu];
param[0] = nargs;
param[1] = arg1;
@@ -76,6 +81,8 @@ u32 omap_secure_dispatcher(u32 idx, u32 flag, u32 nargs, u32 arg1, u32 arg2,
outer_clean_range(__pa(param), __pa(param + 5));
ret = omap_smc2(idx, flag, __pa(param));
+ put_cpu();
+
return ret;
}
@@ -119,8 +126,8 @@ phys_addr_t omap_secure_ram_mempool_base(void)
#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_PM)
u32 omap3_save_secure_ram(void __iomem *addr, int size)
{
+ static u32 param[5];
u32 ret;
- u32 param[5];
if (size != OMAP3_SAVE_SECURE_RAM_SZ)
return OMAP3_SAVE_SECURE_RAM_SZ;
@@ -153,8 +160,8 @@ u32 omap3_save_secure_ram(void __iomem *addr, int size)
u32 rx51_secure_dispatcher(u32 idx, u32 process, u32 flag, u32 nargs,
u32 arg1, u32 arg2, u32 arg3, u32 arg4)
{
+ static u32 param[5];
u32 ret;
- u32 param[5];
param[0] = nargs+1; /* RX-51 needs number of arguments + 1 */
param[1] = arg1;
diff --git a/arch/arm64/boot/dts/amd/Makefile b/arch/arm64/boot/dts/amd/Makefile
index 6a6093064a32..68103a8b0ef5 100644
--- a/arch/arm64/boot/dts/amd/Makefile
+++ b/arch/arm64/boot/dts/amd/Makefile
@@ -1,4 +1,2 @@
# SPDX-License-Identifier: GPL-2.0
-dtb-$(CONFIG_ARCH_SEATTLE) += amd-overdrive.dtb \
- amd-overdrive-rev-b0.dtb amd-overdrive-rev-b1.dtb \
- husky.dtb
+dtb-$(CONFIG_ARCH_SEATTLE) += amd-overdrive-rev-b0.dtb amd-overdrive-rev-b1.dtb
diff --git a/arch/arm64/boot/dts/amd/amd-overdrive-rev-b0.dts b/arch/arm64/boot/dts/amd/amd-overdrive-rev-b0.dts
index 8e341be9a399..c290d1ce2b03 100644
--- a/arch/arm64/boot/dts/amd/amd-overdrive-rev-b0.dts
+++ b/arch/arm64/boot/dts/amd/amd-overdrive-rev-b0.dts
@@ -9,6 +9,7 @@
/dts-v1/;
/include/ "amd-seattle-soc.dtsi"
+/include/ "amd-seattle-cpus.dtsi"
/ {
model = "AMD Seattle (Rev.B0) Development Board (Overdrive)";
@@ -36,14 +37,6 @@
status = "ok";
};
-&gpio2 {
- status = "ok";
-};
-
-&gpio3 {
- status = "ok";
-};
-
&gpio4 {
status = "ok";
};
@@ -79,10 +72,6 @@
};
};
-&ipmi_kcs {
- status = "ok";
-};
-
&smb0 {
/include/ "amd-seattle-xgbe-b.dtsi"
};
diff --git a/arch/arm64/boot/dts/amd/amd-overdrive-rev-b1.dts b/arch/arm64/boot/dts/amd/amd-overdrive-rev-b1.dts
index 92cef05c6b74..e0926f6bb7c3 100644
--- a/arch/arm64/boot/dts/amd/amd-overdrive-rev-b1.dts
+++ b/arch/arm64/boot/dts/amd/amd-overdrive-rev-b1.dts
@@ -9,6 +9,7 @@
/dts-v1/;
/include/ "amd-seattle-soc.dtsi"
+/include/ "amd-seattle-cpus.dtsi"
/ {
model = "AMD Seattle (Rev.B1) Development Board (Overdrive)";
diff --git a/arch/arm64/boot/dts/amd/amd-overdrive.dts b/arch/arm64/boot/dts/amd/amd-overdrive.dts
deleted file mode 100644
index 41b3a6c0993d..000000000000
--- a/arch/arm64/boot/dts/amd/amd-overdrive.dts
+++ /dev/null
@@ -1,66 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * DTS file for AMD Seattle Overdrive Development Board
- *
- * Copyright (C) 2014 Advanced Micro Devices, Inc.
- */
-
-/dts-v1/;
-
-/include/ "amd-seattle-soc.dtsi"
-
-/ {
- model = "AMD Seattle Development Board (Overdrive)";
- compatible = "amd,seattle-overdrive", "amd,seattle";
-
- chosen {
- stdout-path = &serial0;
- };
-};
-
-&ccp0 {
- status = "ok";
-};
-
-&gpio0 {
- status = "ok";
-};
-
-&gpio1 {
- status = "ok";
-};
-
-&i2c0 {
- status = "ok";
-};
-
-&pcie0 {
- status = "ok";
-};
-
-&spi0 {
- status = "ok";
-};
-
-&spi1 {
- status = "ok";
- sdcard0: sdcard@0 {
- compatible = "mmc-spi-slot";
- reg = <0>;
- spi-max-frequency = <20000000>;
- voltage-ranges = <3200 3400>;
- gpios = <&gpio0 7 0>;
- interrupt-parent = <&gpio0>;
- interrupts = <7 3>;
- pl022,hierarchy = <0>;
- pl022,interface = <0>;
- pl022,com-mode = <0x0>;
- pl022,rx-level-trig = <0>;
- pl022,tx-level-trig = <0>;
- };
-};
-
-&v2m0 {
- arm,msi-base-spi = <64>;
- arm,msi-num-spis = <256>;
-};
diff --git a/arch/arm64/boot/dts/amd/amd-seattle-cpus.dtsi b/arch/arm64/boot/dts/amd/amd-seattle-cpus.dtsi
new file mode 100644
index 000000000000..93688a0b6820
--- /dev/null
+++ b/arch/arm64/boot/dts/amd/amd-seattle-cpus.dtsi
@@ -0,0 +1,224 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/ {
+ cpus {
+ #address-cells = <0x1>;
+ #size-cells = <0x0>;
+
+ cpu-map {
+ cluster0 {
+ core0 {
+ cpu = <&CPU0>;
+ };
+ core1 {
+ cpu = <&CPU1>;
+ };
+ };
+ cluster1 {
+ core0 {
+ cpu = <&CPU2>;
+ };
+ core1 {
+ cpu = <&CPU3>;
+ };
+ };
+ cluster2 {
+ core0 {
+ cpu = <&CPU4>;
+ };
+ core1 {
+ cpu = <&CPU5>;
+ };
+ };
+ cluster3 {
+ core0 {
+ cpu = <&CPU6>;
+ };
+ core1 {
+ cpu = <&CPU7>;
+ };
+ };
+ };
+
+ CPU0: cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a57";
+ reg = <0x0>;
+ enable-method = "psci";
+
+ i-cache-size = <0xC000>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <256>;
+ d-cache-size = <0x8000>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <256>;
+ l2-cache = <&L2_0>;
+
+ };
+
+ CPU1: cpu@1 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a57";
+ reg = <0x1>;
+ enable-method = "psci";
+
+ i-cache-size = <0xC000>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <256>;
+ d-cache-size = <0x8000>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <256>;
+ l2-cache = <&L2_0>;
+ };
+
+ CPU2: cpu@100 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a57";
+ reg = <0x100>;
+ enable-method = "psci";
+
+ i-cache-size = <0xC000>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <256>;
+ d-cache-size = <0x8000>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <256>;
+ l2-cache = <&L2_1>;
+ };
+
+ CPU3: cpu@101 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a57";
+ reg = <0x101>;
+ enable-method = "psci";
+
+ i-cache-size = <0xC000>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <256>;
+ d-cache-size = <0x8000>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <256>;
+ l2-cache = <&L2_1>;
+ };
+
+ CPU4: cpu@200 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a57";
+ reg = <0x200>;
+ enable-method = "psci";
+
+ i-cache-size = <0xC000>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <256>;
+ d-cache-size = <0x8000>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <256>;
+ l2-cache = <&L2_2>;
+ };
+
+ CPU5: cpu@201 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a57";
+ reg = <0x201>;
+ enable-method = "psci";
+
+ i-cache-size = <0xC000>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <256>;
+ d-cache-size = <0x8000>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <256>;
+ l2-cache = <&L2_2>;
+ };
+
+ CPU6: cpu@300 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a57";
+ reg = <0x300>;
+ enable-method = "psci";
+
+ i-cache-size = <0xC000>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <256>;
+ d-cache-size = <0x8000>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <256>;
+ l2-cache = <&L2_3>;
+ };
+
+ CPU7: cpu@301 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a57";
+ reg = <0x301>;
+ enable-method = "psci";
+
+ i-cache-size = <0xC000>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <256>;
+ d-cache-size = <0x8000>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <256>;
+ l2-cache = <&L2_3>;
+ };
+ };
+
+ L2_0: l2-cache0 {
+ cache-size = <0x100000>;
+ cache-line-size = <64>;
+ cache-sets = <1024>;
+ cache-unified;
+ next-level-cache = <&L3>;
+ };
+
+ L2_1: l2-cache1 {
+ cache-size = <0x100000>;
+ cache-line-size = <64>;
+ cache-sets = <1024>;
+ cache-unified;
+ next-level-cache = <&L3>;
+ };
+
+ L2_2: l2-cache2 {
+ cache-size = <0x100000>;
+ cache-line-size = <64>;
+ cache-sets = <1024>;
+ cache-unified;
+ next-level-cache = <&L3>;
+ };
+
+ L2_3: l2-cache3 {
+ cache-size = <0x100000>;
+ cache-line-size = <64>;
+ cache-sets = <1024>;
+ cache-unified;
+ next-level-cache = <&L3>;
+ };
+
+ L3: l3-cache {
+ cache-level = <3>;
+ cache-size = <0x800000>;
+ cache-line-size = <64>;
+ cache-sets = <8192>;
+ cache-unified;
+ };
+
+ pmu {
+ compatible = "arm,cortex-a57-pmu";
+ interrupts = <0x0 0x7 0x4>,
+ <0x0 0x8 0x4>,
+ <0x0 0x9 0x4>,
+ <0x0 0xa 0x4>,
+ <0x0 0xb 0x4>,
+ <0x0 0xc 0x4>,
+ <0x0 0xd 0x4>,
+ <0x0 0xe 0x4>;
+ interrupt-affinity = <&CPU0>,
+ <&CPU1>,
+ <&CPU2>,
+ <&CPU3>,
+ <&CPU4>,
+ <&CPU5>,
+ <&CPU6>,
+ <&CPU7>;
+ };
+};
diff --git a/arch/arm64/boot/dts/amd/amd-seattle-soc.dtsi b/arch/arm64/boot/dts/amd/amd-seattle-soc.dtsi
index b664e7af74eb..690020589d41 100644
--- a/arch/arm64/boot/dts/amd/amd-seattle-soc.dtsi
+++ b/arch/arm64/boot/dts/amd/amd-seattle-soc.dtsi
@@ -38,18 +38,6 @@
<1 10 0xff04>;
};
- pmu {
- compatible = "arm,armv8-pmuv3";
- interrupts = <0 7 4>,
- <0 8 4>,
- <0 9 4>,
- <0 10 4>,
- <0 11 4>,
- <0 12 4>,
- <0 13 4>,
- <0 14 4>;
- };
-
smb0: smb {
compatible = "simple-bus";
#address-cells = <2>;
@@ -70,6 +58,7 @@
reg = <0 0xe0300000 0 0xf0000>;
interrupts = <0 355 4>;
clocks = <&sataclk_333mhz>;
+ iommus = <&sata0_smmu 0x0 0x1f>;
dma-coherent;
};
@@ -80,6 +69,27 @@
reg = <0 0xe0d00000 0 0xf0000>;
interrupts = <0 354 4>;
clocks = <&sataclk_333mhz>;
+ iommus = <&sata1_smmu 0x0e>,
+ <&sata1_smmu 0x0f>,
+ <&sata1_smmu 0x1e>;
+ dma-coherent;
+ };
+
+ sata0_smmu: iommu@e0200000 {
+ compatible = "arm,mmu-401";
+ reg = <0 0xe0200000 0 0x10000>;
+ #global-interrupts = <1>;
+ interrupts = <0 332 4>, <0 332 4>;
+ #iommu-cells = <2>;
+ dma-coherent;
+ };
+
+ sata1_smmu: iommu@e0c00000 {
+ compatible = "arm,mmu-401";
+ reg = <0 0xe0c00000 0 0x10000>;
+ #global-interrupts = <1>;
+ interrupts = <0 331 4>, <0 331 4>;
+ #iommu-cells = <1>;
dma-coherent;
};
@@ -201,6 +211,10 @@
reg = <0 0xe0100000 0 0x10000>;
interrupts = <0 3 4>;
dma-coherent;
+ iommus = <&sata1_smmu 0x00>,
+ <&sata1_smmu 0x02>,
+ <&sata1_smmu 0x40>,
+ <&sata1_smmu 0x42>;
};
pcie0: pcie@f0000000 {
@@ -213,12 +227,22 @@
msi-parent = <&v2m0>;
reg = <0 0xf0000000 0 0x10000000>;
- interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
+ interrupt-map-mask = <0xff00 0x0 0x0 0x7>;
interrupt-map =
- <0x1000 0x0 0x0 0x1 &gic0 0x0 0x0 0x0 0x120 0x1>,
- <0x1000 0x0 0x0 0x2 &gic0 0x0 0x0 0x0 0x121 0x1>,
- <0x1000 0x0 0x0 0x3 &gic0 0x0 0x0 0x0 0x122 0x1>,
- <0x1000 0x0 0x0 0x4 &gic0 0x0 0x0 0x0 0x123 0x1>;
+ <0x1100 0x0 0x0 0x1 &gic0 0x0 0x0 0x0 0x120 0x1>,
+ <0x1100 0x0 0x0 0x2 &gic0 0x0 0x0 0x0 0x121 0x1>,
+ <0x1100 0x0 0x0 0x3 &gic0 0x0 0x0 0x0 0x122 0x1>,
+ <0x1100 0x0 0x0 0x4 &gic0 0x0 0x0 0x0 0x123 0x1>,
+
+ <0x1200 0x0 0x0 0x1 &gic0 0x0 0x0 0x0 0x124 0x1>,
+ <0x1200 0x0 0x0 0x2 &gic0 0x0 0x0 0x0 0x125 0x1>,
+ <0x1200 0x0 0x0 0x3 &gic0 0x0 0x0 0x0 0x126 0x1>,
+ <0x1200 0x0 0x0 0x4 &gic0 0x0 0x0 0x0 0x127 0x1>,
+
+ <0x1300 0x0 0x0 0x1 &gic0 0x0 0x0 0x0 0x128 0x1>,
+ <0x1300 0x0 0x0 0x2 &gic0 0x0 0x0 0x0 0x129 0x1>,
+ <0x1300 0x0 0x0 0x3 &gic0 0x0 0x0 0x0 0x12a 0x1>,
+ <0x1300 0x0 0x0 0x4 &gic0 0x0 0x0 0x0 0x12b 0x1>;
dma-coherent;
dma-ranges = <0x43000000 0x0 0x0 0x0 0x0 0x100 0x0>;
@@ -227,8 +251,18 @@
<0x01000000 0x00 0x00000000 0x00 0xefff0000 0x00 0x00010000>,
/* 32-bit MMIO (size=2G) */
<0x02000000 0x00 0x40000000 0x00 0x40000000 0x00 0x80000000>,
- /* 64-bit MMIO (size= 124G) */
+ /* 64-bit MMIO (size= 508G) */
<0x03000000 0x01 0x00000000 0x01 0x00000000 0x7f 0x00000000>;
+ iommu-map = <0x0 &pcie_smmu 0x0 0x10000>;
+ };
+
+ pcie_smmu: iommu@e0a00000 {
+ compatible = "arm,mmu-401";
+ reg = <0 0xe0a00000 0 0x10000>;
+ #global-interrupts = <1>;
+ interrupts = <0 333 4>, <0 333 4>;
+ #iommu-cells = <1>;
+ dma-coherent;
};
/* Perf CCN504 PMU */
diff --git a/arch/arm64/boot/dts/amd/amd-seattle-xgbe-b.dtsi b/arch/arm64/boot/dts/amd/amd-seattle-xgbe-b.dtsi
index d97498361ce3..9259e547e2e8 100644
--- a/arch/arm64/boot/dts/amd/amd-seattle-xgbe-b.dtsi
+++ b/arch/arm64/boot/dts/amd/amd-seattle-xgbe-b.dtsi
@@ -55,7 +55,7 @@
clocks = <&xgmacclk0_dma_250mhz>, <&xgmacclk0_ptp_250mhz>;
clock-names = "dma_clk", "ptp_clk";
phy-mode = "xgmii";
- #stream-id-cells = <16>;
+ iommus = <&xgmac0_smmu 0x00 0x17>; /* 0-7, 16-23 */
dma-coherent;
};
@@ -81,11 +81,11 @@
clocks = <&xgmacclk1_dma_250mhz>, <&xgmacclk1_ptp_250mhz>;
clock-names = "dma_clk", "ptp_clk";
phy-mode = "xgmii";
- #stream-id-cells = <16>;
+ iommus = <&xgmac1_smmu 0x00 0x17>; /* 0-7, 16-23 */
dma-coherent;
};
- xgmac0_smmu: smmu@e0600000 {
+ xgmac0_smmu: iommu@e0600000 {
compatible = "arm,mmu-401";
reg = <0 0xe0600000 0 0x10000>;
#global-interrupts = <1>;
@@ -94,14 +94,11 @@
*/
<0 336 4>,
<0 336 4>;
-
- mmu-masters = <&xgmac0
- 0 1 2 3 4 5 6 7
- 16 17 18 19 20 21 22 23
- >;
+ #iommu-cells = <2>;
+ dma-coherent;
};
- xgmac1_smmu: smmu@e0800000 {
+ xgmac1_smmu: iommu@e0800000 {
compatible = "arm,mmu-401";
reg = <0 0xe0800000 0 0x10000>;
#global-interrupts = <1>;
@@ -110,9 +107,6 @@
*/
<0 335 4>,
<0 335 4>;
-
- mmu-masters = <&xgmac1
- 0 1 2 3 4 5 6 7
- 16 17 18 19 20 21 22 23
- >;
+ #iommu-cells = <2>;
+ dma-coherent;
};
diff --git a/arch/arm64/boot/dts/amd/husky.dts b/arch/arm64/boot/dts/amd/husky.dts
deleted file mode 100644
index 7acde34772cb..000000000000
--- a/arch/arm64/boot/dts/amd/husky.dts
+++ /dev/null
@@ -1,84 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * DTS file for AMD/Linaro 96Boards Enterprise Edition Server (Husky) Board
- * Note: Based-on AMD Seattle Rev.B0
- *
- * Copyright (C) 2015 Advanced Micro Devices, Inc.
- */
-
-/dts-v1/;
-
-/include/ "amd-seattle-soc.dtsi"
-
-/ {
- model = "Linaro 96Boards Enterprise Edition Server (Husky) Board";
- compatible = "amd,seattle-overdrive", "amd,seattle";
-
- chosen {
- stdout-path = &serial0;
- };
-
- psci {
- compatible = "arm,psci-0.2";
- method = "smc";
- };
-};
-
-&ccp0 {
- status = "ok";
- amd,zlib-support = <1>;
-};
-
-/**
- * NOTE: In Rev.B, gpio0 is reserved.
- */
-&gpio1 {
- status = "ok";
-};
-
-&gpio2 {
- status = "ok";
-};
-
-&gpio3 {
- status = "ok";
-};
-
-&gpio4 {
- status = "ok";
-};
-
-&i2c0 {
- status = "ok";
-};
-
-&i2c1 {
- status = "ok";
-};
-
-&pcie0 {
- status = "ok";
-};
-
-&spi0 {
- status = "ok";
-};
-
-&spi1 {
- status = "ok";
- sdcard0: sdcard@0 {
- compatible = "mmc-spi-slot";
- reg = <0>;
- spi-max-frequency = <20000000>;
- voltage-ranges = <3200 3400>;
- pl022,hierarchy = <0>;
- pl022,interface = <0>;
- pl022,com-mode = <0x0>;
- pl022,rx-level-trig = <0>;
- pl022,tx-level-trig = <0>;
- };
-};
-
-&smb0 {
- /include/ "amd-seattle-xgbe-b.dtsi"
-};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
index 01b01e320411..35d1939e690b 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
@@ -536,9 +536,9 @@
clock-names = "i2c";
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(1)>;
- dmas = <&edma0 1 39>,
- <&edma0 1 38>;
- dma-names = "tx", "rx";
+ dmas = <&edma0 1 38>,
+ <&edma0 1 39>;
+ dma-names = "rx", "tx";
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
index 687fea6d8afa..4e7bd04d9798 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
@@ -499,9 +499,9 @@
interrupts = <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(2)>;
- dmas = <&edma0 1 39>,
- <&edma0 1 38>;
- dma-names = "tx", "rx";
+ dmas = <&edma0 1 38>,
+ <&edma0 1 39>;
+ dma-names = "rx", "tx";
status = "disabled";
};
diff --git a/arch/h8300/include/asm/user.h b/arch/h8300/include/asm/user.h
index 2298909f24c6..161653d84b34 100644
--- a/arch/h8300/include/asm/user.h
+++ b/arch/h8300/include/asm/user.h
@@ -67,9 +67,5 @@ struct user {
unsigned long magic; /* To uniquely identify a core file */
char u_comm[32]; /* User command that was responsible */
};
-#define NBPG PAGE_SIZE
-#define UPAGES 1
-#define HOST_TEXT_START_ADDR (u.start_code)
-#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG)
#endif
diff --git a/arch/ia64/include/asm/user.h b/arch/ia64/include/asm/user.h
index 0ba486651b7c..ec03d3ab8715 100644
--- a/arch/ia64/include/asm/user.h
+++ b/arch/ia64/include/asm/user.h
@@ -50,10 +50,4 @@ struct user {
char u_comm[32]; /* user command name */
};
-#define NBPG PAGE_SIZE
-#define UPAGES 1
-#define HOST_TEXT_START_ADDR (u.start_code)
-#define HOST_DATA_START_ADDR (u.start_data)
-#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG)
-
#endif /* _ASM_IA64_USER_H */
diff --git a/arch/m68k/include/asm/user.h b/arch/m68k/include/asm/user.h
index 509d555977c8..61413bff613a 100644
--- a/arch/m68k/include/asm/user.h
+++ b/arch/m68k/include/asm/user.h
@@ -79,9 +79,5 @@ struct user{
unsigned long magic; /* To uniquely identify a core file */
char u_comm[32]; /* User command that was responsible */
};
-#define NBPG 4096
-#define UPAGES 1
-#define HOST_TEXT_START_ADDR (u.start_code)
-#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG)
#endif
diff --git a/arch/mips/crypto/crc32-mips.c b/arch/mips/crypto/crc32-mips.c
index 0a03529cf317..3e4f5ba104f8 100644
--- a/arch/mips/crypto/crc32-mips.c
+++ b/arch/mips/crypto/crc32-mips.c
@@ -28,7 +28,7 @@ enum crc_type {
};
#ifndef TOOLCHAIN_SUPPORTS_CRC
-#define _ASM_MACRO_CRC32(OP, SZ, TYPE) \
+#define _ASM_SET_CRC(OP, SZ, TYPE) \
_ASM_MACRO_3R(OP, rt, rs, rt2, \
".ifnc \\rt, \\rt2\n\t" \
".error \"invalid operands \\\"" #OP " \\rt,\\rs,\\rt2\\\"\"\n\t" \
@@ -37,30 +37,36 @@ _ASM_MACRO_3R(OP, rt, rs, rt2, \
((SZ) << 6) | ((TYPE) << 8)) \
_ASM_INSN32_IF_MM(0x00000030 | (__rs << 16) | (__rt << 21) | \
((SZ) << 14) | ((TYPE) << 3)))
-_ASM_MACRO_CRC32(crc32b, 0, 0);
-_ASM_MACRO_CRC32(crc32h, 1, 0);
-_ASM_MACRO_CRC32(crc32w, 2, 0);
-_ASM_MACRO_CRC32(crc32d, 3, 0);
-_ASM_MACRO_CRC32(crc32cb, 0, 1);
-_ASM_MACRO_CRC32(crc32ch, 1, 1);
-_ASM_MACRO_CRC32(crc32cw, 2, 1);
-_ASM_MACRO_CRC32(crc32cd, 3, 1);
-#define _ASM_SET_CRC ""
+#define _ASM_UNSET_CRC(op, SZ, TYPE) ".purgem " #op "\n\t"
#else /* !TOOLCHAIN_SUPPORTS_CRC */
-#define _ASM_SET_CRC ".set\tcrc\n\t"
+#define _ASM_SET_CRC(op, SZ, TYPE) ".set\tcrc\n\t"
+#define _ASM_UNSET_CRC(op, SZ, TYPE)
#endif
-#define _CRC32(crc, value, size, type) \
-do { \
- __asm__ __volatile__( \
- ".set push\n\t" \
- _ASM_SET_CRC \
- #type #size " %0, %1, %0\n\t" \
- ".set pop" \
- : "+r" (crc) \
- : "r" (value)); \
+#define __CRC32(crc, value, op, SZ, TYPE) \
+do { \
+ __asm__ __volatile__( \
+ ".set push\n\t" \
+ _ASM_SET_CRC(op, SZ, TYPE) \
+ #op " %0, %1, %0\n\t" \
+ _ASM_UNSET_CRC(op, SZ, TYPE) \
+ ".set pop" \
+ : "+r" (crc) \
+ : "r" (value)); \
} while (0)
+#define _CRC32_crc32b(crc, value) __CRC32(crc, value, crc32b, 0, 0)
+#define _CRC32_crc32h(crc, value) __CRC32(crc, value, crc32h, 1, 0)
+#define _CRC32_crc32w(crc, value) __CRC32(crc, value, crc32w, 2, 0)
+#define _CRC32_crc32d(crc, value) __CRC32(crc, value, crc32d, 3, 0)
+#define _CRC32_crc32cb(crc, value) __CRC32(crc, value, crc32cb, 0, 1)
+#define _CRC32_crc32ch(crc, value) __CRC32(crc, value, crc32ch, 1, 1)
+#define _CRC32_crc32cw(crc, value) __CRC32(crc, value, crc32cw, 2, 1)
+#define _CRC32_crc32cd(crc, value) __CRC32(crc, value, crc32cd, 3, 1)
+
+#define _CRC32(crc, value, size, op) \
+ _CRC32_##op##size(crc, value)
+
#define CRC32(crc, value, size) \
_CRC32(crc, value, size, crc32)
diff --git a/arch/mips/include/asm/mach-rc32434/rb.h b/arch/mips/include/asm/mach-rc32434/rb.h
index 34d179ca020b..dd9d4b026e62 100644
--- a/arch/mips/include/asm/mach-rc32434/rb.h
+++ b/arch/mips/include/asm/mach-rc32434/rb.h
@@ -29,15 +29,6 @@
#define DEV3TC 0x01003C
#define BTCS 0x010040
#define BTCOMPARE 0x010044
-#define GPIOBASE 0x050000
-/* Offsets relative to GPIOBASE */
-#define GPIOFUNC 0x00
-#define GPIOCFG 0x04
-#define GPIOD 0x08
-#define GPIOILEVEL 0x0C
-#define GPIOISTAT 0x10
-#define GPIONMIEN 0x14
-#define IMASK6 0x38
#define LO_WPX (1 << 0)
#define LO_ALE (1 << 1)
#define LO_CLE (1 << 2)
diff --git a/arch/mips/lantiq/falcon/sysctrl.c b/arch/mips/lantiq/falcon/sysctrl.c
index 64726c670ca6..5204fc6d6d50 100644
--- a/arch/mips/lantiq/falcon/sysctrl.c
+++ b/arch/mips/lantiq/falcon/sysctrl.c
@@ -167,6 +167,8 @@ static inline void clkdev_add_sys(const char *dev, unsigned int module,
{
struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
+ if (!clk)
+ return;
clk->cl.dev_id = dev;
clk->cl.con_id = NULL;
clk->cl.clk = clk;
diff --git a/arch/mips/lantiq/xway/gptu.c b/arch/mips/lantiq/xway/gptu.c
index 3d5683e75cf1..200fe9ff641d 100644
--- a/arch/mips/lantiq/xway/gptu.c
+++ b/arch/mips/lantiq/xway/gptu.c
@@ -122,6 +122,8 @@ static inline void clkdev_add_gptu(struct device *dev, const char *con,
{
struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
+ if (!clk)
+ return;
clk->cl.dev_id = dev_name(dev);
clk->cl.con_id = con;
clk->cl.clk = clk;
diff --git a/arch/mips/lantiq/xway/sysctrl.c b/arch/mips/lantiq/xway/sysctrl.c
index 917fac1636b7..084f6caba5f2 100644
--- a/arch/mips/lantiq/xway/sysctrl.c
+++ b/arch/mips/lantiq/xway/sysctrl.c
@@ -315,6 +315,8 @@ static void clkdev_add_pmu(const char *dev, const char *con, bool deactivate,
{
struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
+ if (!clk)
+ return;
clk->cl.dev_id = dev;
clk->cl.con_id = con;
clk->cl.clk = clk;
@@ -338,6 +340,8 @@ static void clkdev_add_cgu(const char *dev, const char *con,
{
struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
+ if (!clk)
+ return;
clk->cl.dev_id = dev;
clk->cl.con_id = con;
clk->cl.clk = clk;
@@ -356,24 +360,28 @@ static void clkdev_add_pci(void)
struct clk *clk_ext = kzalloc(sizeof(struct clk), GFP_KERNEL);
/* main pci clock */
- clk->cl.dev_id = "17000000.pci";
- clk->cl.con_id = NULL;
- clk->cl.clk = clk;
- clk->rate = CLOCK_33M;
- clk->rates = valid_pci_rates;
- clk->enable = pci_enable;
- clk->disable = pmu_disable;
- clk->module = 0;
- clk->bits = PMU_PCI;
- clkdev_add(&clk->cl);
+ if (clk) {
+ clk->cl.dev_id = "17000000.pci";
+ clk->cl.con_id = NULL;
+ clk->cl.clk = clk;
+ clk->rate = CLOCK_33M;
+ clk->rates = valid_pci_rates;
+ clk->enable = pci_enable;
+ clk->disable = pmu_disable;
+ clk->module = 0;
+ clk->bits = PMU_PCI;
+ clkdev_add(&clk->cl);
+ }
/* use internal/external bus clock */
- clk_ext->cl.dev_id = "17000000.pci";
- clk_ext->cl.con_id = "external";
- clk_ext->cl.clk = clk_ext;
- clk_ext->enable = pci_ext_enable;
- clk_ext->disable = pci_ext_disable;
- clkdev_add(&clk_ext->cl);
+ if (clk_ext) {
+ clk_ext->cl.dev_id = "17000000.pci";
+ clk_ext->cl.con_id = "external";
+ clk_ext->cl.clk = clk_ext;
+ clk_ext->enable = pci_ext_enable;
+ clk_ext->disable = pci_ext_disable;
+ clkdev_add(&clk_ext->cl);
+ }
}
/* xway socs can generate clocks on gpio pins */
@@ -393,9 +401,15 @@ static void clkdev_add_clkout(void)
char *name;
name = kzalloc(sizeof("clkout0"), GFP_KERNEL);
+ if (!name)
+ continue;
sprintf(name, "clkout%d", i);
clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
+ if (!clk) {
+ kfree(name);
+ continue;
+ }
clk->cl.dev_id = "1f103000.cgu";
clk->cl.con_id = name;
clk->cl.clk = clk;
diff --git a/arch/mips/rb532/gpio.c b/arch/mips/rb532/gpio.c
index 94f02ada4082..29c21b9d42da 100644
--- a/arch/mips/rb532/gpio.c
+++ b/arch/mips/rb532/gpio.c
@@ -37,6 +37,16 @@
#include <asm/mach-rc32434/rb.h>
#include <asm/mach-rc32434/gpio.h>
+#define GPIOBASE 0x050000
+/* Offsets relative to GPIOBASE */
+#define GPIOFUNC 0x00
+#define GPIOCFG 0x04
+#define GPIOD 0x08
+#define GPIOILEVEL 0x0C
+#define GPIOISTAT 0x10
+#define GPIONMIEN 0x14
+#define IMASK6 0x38
+
struct rb532_gpio_chip {
struct gpio_chip chip;
void __iomem *regbase;
diff --git a/arch/mips/sgi-ip22/ip22-gio.c b/arch/mips/sgi-ip22/ip22-gio.c
index dfc52f661ad0..38d12f417e48 100644
--- a/arch/mips/sgi-ip22/ip22-gio.c
+++ b/arch/mips/sgi-ip22/ip22-gio.c
@@ -363,6 +363,8 @@ static void ip22_check_gio(int slotno, unsigned long addr, int irq)
printk(KERN_INFO "GIO: slot %d : %s (id %x)\n",
slotno, name, id);
gio_dev = kzalloc(sizeof *gio_dev, GFP_KERNEL);
+ if (!gio_dev)
+ return;
gio_dev->name = name;
gio_dev->slotno = slotno;
gio_dev->id.id = id;
diff --git a/arch/powerpc/include/asm/user.h b/arch/powerpc/include/asm/user.h
index 99443b8594e7..7fae7e597ba4 100644
--- a/arch/powerpc/include/asm/user.h
+++ b/arch/powerpc/include/asm/user.h
@@ -44,9 +44,4 @@ struct user {
char u_comm[32]; /* user command name */
};
-#define NBPG PAGE_SIZE
-#define UPAGES 1
-#define HOST_TEXT_START_ADDR (u.start_code)
-#define HOST_DATA_START_ADDR (u.start_data)
-#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG)
#endif /* _ASM_POWERPC_USER_H */
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index ea8ec8a960bd..00fd9c548f26 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -16,6 +16,7 @@ config RISCV
select ARCH_ENABLE_HUGEPAGE_MIGRATION if HUGETLB_PAGE && MIGRATION
select ARCH_ENABLE_SPLIT_PMD_PTLOCK if PGTABLE_LEVELS > 2
select ARCH_HAS_BINFMT_FLAT
+ select ARCH_HAS_CURRENT_STACK_POINTER
select ARCH_HAS_DEBUG_VM_PGTABLE
select ARCH_HAS_DEBUG_VIRTUAL if MMU
select ARCH_HAS_DEBUG_WX
@@ -47,6 +48,7 @@ config RISCV
select CLONE_BACKWARDS
select CLINT_TIMER if !MMU
select COMMON_CLK
+ select CPU_PM if CPU_IDLE
select EDAC_SUPPORT
select GENERIC_ARCH_TOPOLOGY if SMP
select GENERIC_ATOMIC64 if !64BIT
@@ -533,4 +535,10 @@ source "kernel/power/Kconfig"
endmenu
+menu "CPU Power Management"
+
+source "drivers/cpuidle/Kconfig"
+
+endmenu
+
source "arch/riscv/kvm/Kconfig"
diff --git a/arch/riscv/Kconfig.socs b/arch/riscv/Kconfig.socs
index c112ab2a9052..34592d00dde8 100644
--- a/arch/riscv/Kconfig.socs
+++ b/arch/riscv/Kconfig.socs
@@ -36,6 +36,9 @@ config SOC_VIRT
select GOLDFISH
select RTC_DRV_GOLDFISH if RTC_CLASS
select SIFIVE_PLIC
+ select PM_GENERIC_DOMAINS if PM
+ select PM_GENERIC_DOMAINS_OF if PM && OF
+ select RISCV_SBI_CPUIDLE if CPU_IDLE
help
This enables support for QEMU Virt Machine.
diff --git a/arch/riscv/boot/dts/canaan/sipeed_maix_bit.dts b/arch/riscv/boot/dts/canaan/sipeed_maix_bit.dts
index 984872f3d3a9..b9e30df127fe 100644
--- a/arch/riscv/boot/dts/canaan/sipeed_maix_bit.dts
+++ b/arch/riscv/boot/dts/canaan/sipeed_maix_bit.dts
@@ -203,6 +203,8 @@
compatible = "jedec,spi-nor";
reg = <0>;
spi-max-frequency = <50000000>;
+ spi-tx-bus-width = <4>;
+ spi-rx-bus-width = <4>;
m25p,fast-read;
broken-flash-reset;
};
diff --git a/arch/riscv/boot/dts/canaan/sipeed_maix_dock.dts b/arch/riscv/boot/dts/canaan/sipeed_maix_dock.dts
index 7ba99b4da304..8d23401b0bbb 100644
--- a/arch/riscv/boot/dts/canaan/sipeed_maix_dock.dts
+++ b/arch/riscv/boot/dts/canaan/sipeed_maix_dock.dts
@@ -205,6 +205,8 @@
compatible = "jedec,spi-nor";
reg = <0>;
spi-max-frequency = <50000000>;
+ spi-tx-bus-width = <4>;
+ spi-rx-bus-width = <4>;
m25p,fast-read;
broken-flash-reset;
};
diff --git a/arch/riscv/boot/dts/canaan/sipeed_maix_go.dts b/arch/riscv/boot/dts/canaan/sipeed_maix_go.dts
index be9b12c9b374..24fd83b43d9d 100644
--- a/arch/riscv/boot/dts/canaan/sipeed_maix_go.dts
+++ b/arch/riscv/boot/dts/canaan/sipeed_maix_go.dts
@@ -213,6 +213,8 @@
compatible = "jedec,spi-nor";
reg = <0>;
spi-max-frequency = <50000000>;
+ spi-tx-bus-width = <4>;
+ spi-rx-bus-width = <4>;
m25p,fast-read;
broken-flash-reset;
};
diff --git a/arch/riscv/boot/dts/canaan/sipeed_maixduino.dts b/arch/riscv/boot/dts/canaan/sipeed_maixduino.dts
index 031c0c28f819..25341f38292a 100644
--- a/arch/riscv/boot/dts/canaan/sipeed_maixduino.dts
+++ b/arch/riscv/boot/dts/canaan/sipeed_maixduino.dts
@@ -178,6 +178,8 @@
compatible = "jedec,spi-nor";
reg = <0>;
spi-max-frequency = <50000000>;
+ spi-tx-bus-width = <4>;
+ spi-rx-bus-width = <4>;
m25p,fast-read;
broken-flash-reset;
};
diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig
index 7cd10ded7bf8..30e3017f22bc 100644
--- a/arch/riscv/configs/defconfig
+++ b/arch/riscv/configs/defconfig
@@ -15,11 +15,14 @@ CONFIG_CHECKPOINT_RESTORE=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
# CONFIG_SYSFS_SYSCALL is not set
+CONFIG_PROFILING=y
CONFIG_SOC_MICROCHIP_POLARFIRE=y
CONFIG_SOC_SIFIVE=y
CONFIG_SOC_VIRT=y
CONFIG_SMP=y
CONFIG_HOTPLUG_CPU=y
+CONFIG_PM=y
+CONFIG_CPU_IDLE=y
CONFIG_VIRTUALIZATION=y
CONFIG_KVM=m
CONFIG_JUMP_LABEL=y
@@ -64,8 +67,6 @@ CONFIG_INPUT_MOUSEDEV=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_OF_PLATFORM=y
-CONFIG_SERIAL_EARLYCON_RISCV_SBI=y
-CONFIG_HVC_RISCV_SBI=y
CONFIG_VIRTIO_CONSOLE=y
CONFIG_HW_RANDOM=y
CONFIG_HW_RANDOM_VIRTIO=y
diff --git a/arch/riscv/configs/nommu_k210_defconfig b/arch/riscv/configs/nommu_k210_defconfig
index 3f42ed87dde8..2438fa39f8ae 100644
--- a/arch/riscv/configs/nommu_k210_defconfig
+++ b/arch/riscv/configs/nommu_k210_defconfig
@@ -21,7 +21,6 @@ CONFIG_CC_OPTIMIZE_FOR_SIZE=y
# CONFIG_AIO is not set
# CONFIG_IO_URING is not set
# CONFIG_ADVISE_SYSCALLS is not set
-# CONFIG_MEMBARRIER is not set
# CONFIG_KALLSYMS is not set
CONFIG_EMBEDDED=y
# CONFIG_VM_EVENT_COUNTERS is not set
diff --git a/arch/riscv/configs/nommu_k210_sdcard_defconfig b/arch/riscv/configs/nommu_k210_sdcard_defconfig
index af64b95e88cc..9a133e63ae5b 100644
--- a/arch/riscv/configs/nommu_k210_sdcard_defconfig
+++ b/arch/riscv/configs/nommu_k210_sdcard_defconfig
@@ -13,7 +13,6 @@ CONFIG_CC_OPTIMIZE_FOR_SIZE=y
# CONFIG_AIO is not set
# CONFIG_IO_URING is not set
# CONFIG_ADVISE_SYSCALLS is not set
-# CONFIG_MEMBARRIER is not set
# CONFIG_KALLSYMS is not set
CONFIG_EMBEDDED=y
# CONFIG_VM_EVENT_COUNTERS is not set
diff --git a/arch/riscv/configs/nommu_virt_defconfig b/arch/riscv/configs/nommu_virt_defconfig
index e1c9864b6237..5269fbb6b4fc 100644
--- a/arch/riscv/configs/nommu_virt_defconfig
+++ b/arch/riscv/configs/nommu_virt_defconfig
@@ -19,7 +19,6 @@ CONFIG_EXPERT=y
# CONFIG_AIO is not set
# CONFIG_IO_URING is not set
# CONFIG_ADVISE_SYSCALLS is not set
-# CONFIG_MEMBARRIER is not set
# CONFIG_KALLSYMS is not set
# CONFIG_VM_EVENT_COUNTERS is not set
# CONFIG_COMPAT_BRK is not set
diff --git a/arch/riscv/configs/rv32_defconfig b/arch/riscv/configs/rv32_defconfig
index e0e5c7c09ab8..7e5efdc3829d 100644
--- a/arch/riscv/configs/rv32_defconfig
+++ b/arch/riscv/configs/rv32_defconfig
@@ -15,11 +15,14 @@ CONFIG_CHECKPOINT_RESTORE=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
# CONFIG_SYSFS_SYSCALL is not set
+CONFIG_PROFILING=y
CONFIG_SOC_SIFIVE=y
CONFIG_SOC_VIRT=y
CONFIG_ARCH_RV32I=y
CONFIG_SMP=y
CONFIG_HOTPLUG_CPU=y
+CONFIG_PM=y
+CONFIG_CPU_IDLE=y
CONFIG_VIRTUALIZATION=y
CONFIG_KVM=m
CONFIG_JUMP_LABEL=y
@@ -62,8 +65,6 @@ CONFIG_INPUT_MOUSEDEV=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_OF_PLATFORM=y
-CONFIG_SERIAL_EARLYCON_RISCV_SBI=y
-CONFIG_HVC_RISCV_SBI=y
CONFIG_VIRTIO_CONSOLE=y
CONFIG_HW_RANDOM=y
CONFIG_HW_RANDOM_VIRTIO=y
diff --git a/arch/riscv/include/asm/asm.h b/arch/riscv/include/asm/asm.h
index 618d7c5af1a2..8c2549b16ac0 100644
--- a/arch/riscv/include/asm/asm.h
+++ b/arch/riscv/include/asm/asm.h
@@ -67,4 +67,30 @@
#error "Unexpected __SIZEOF_SHORT__"
#endif
+#ifdef __ASSEMBLY__
+
+/* Common assembly source macros */
+
+#ifdef CONFIG_XIP_KERNEL
+.macro XIP_FIXUP_OFFSET reg
+ REG_L t0, _xip_fixup
+ add \reg, \reg, t0
+.endm
+.macro XIP_FIXUP_FLASH_OFFSET reg
+ la t1, __data_loc
+ REG_L t1, _xip_phys_offset
+ sub \reg, \reg, t1
+ add \reg, \reg, t0
+.endm
+_xip_fixup: .dword CONFIG_PHYS_RAM_BASE - CONFIG_XIP_PHYS_ADDR - XIP_OFFSET
+_xip_phys_offset: .dword CONFIG_XIP_PHYS_ADDR + XIP_OFFSET
+#else
+.macro XIP_FIXUP_OFFSET reg
+.endm
+.macro XIP_FIXUP_FLASH_OFFSET reg
+.endm
+#endif /* CONFIG_XIP_KERNEL */
+
+#endif /* __ASSEMBLY__ */
+
#endif /* _ASM_RISCV_ASM_H */
diff --git a/arch/riscv/include/asm/cpuidle.h b/arch/riscv/include/asm/cpuidle.h
new file mode 100644
index 000000000000..71fdc607d4bc
--- /dev/null
+++ b/arch/riscv/include/asm/cpuidle.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Allwinner Ltd
+ * Copyright (C) 2021 Western Digital Corporation or its affiliates.
+ */
+
+#ifndef _ASM_RISCV_CPUIDLE_H
+#define _ASM_RISCV_CPUIDLE_H
+
+#include <asm/barrier.h>
+#include <asm/processor.h>
+
+static inline void cpu_do_idle(void)
+{
+ /*
+ * Add mb() here to ensure that all
+ * IO/MEM accesses are completed prior
+ * to entering WFI.
+ */
+ mb();
+ wait_for_interrupt();
+}
+
+#endif
diff --git a/arch/riscv/include/asm/current.h b/arch/riscv/include/asm/current.h
index 1de233d8e8de..21774d868c65 100644
--- a/arch/riscv/include/asm/current.h
+++ b/arch/riscv/include/asm/current.h
@@ -33,6 +33,8 @@ static __always_inline struct task_struct *get_current(void)
#define current get_current()
+register unsigned long current_stack_pointer __asm__("sp");
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_RISCV_CURRENT_H */
diff --git a/arch/riscv/include/asm/module.lds.h b/arch/riscv/include/asm/module.lds.h
index 4254ff2ff049..1075beae1ac6 100644
--- a/arch/riscv/include/asm/module.lds.h
+++ b/arch/riscv/include/asm/module.lds.h
@@ -2,8 +2,8 @@
/* Copyright (C) 2017 Andes Technology Corporation */
#ifdef CONFIG_MODULE_SECTIONS
SECTIONS {
- .plt (NOLOAD) : { BYTE(0) }
- .got (NOLOAD) : { BYTE(0) }
- .got.plt (NOLOAD) : { BYTE(0) }
+ .plt : { BYTE(0) }
+ .got : { BYTE(0) }
+ .got.plt : { BYTE(0) }
}
#endif
diff --git a/arch/riscv/include/asm/suspend.h b/arch/riscv/include/asm/suspend.h
new file mode 100644
index 000000000000..8be391c2aecb
--- /dev/null
+++ b/arch/riscv/include/asm/suspend.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021 Western Digital Corporation or its affiliates.
+ * Copyright (c) 2022 Ventana Micro Systems Inc.
+ */
+
+#ifndef _ASM_RISCV_SUSPEND_H
+#define _ASM_RISCV_SUSPEND_H
+
+#include <asm/ptrace.h>
+
+struct suspend_context {
+ /* Saved and restored by low-level functions */
+ struct pt_regs regs;
+ /* Saved and restored by high-level functions */
+ unsigned long scratch;
+ unsigned long tvec;
+ unsigned long ie;
+#ifdef CONFIG_MMU
+ unsigned long satp;
+#endif
+};
+
+/* Low-level CPU suspend entry function */
+int __cpu_suspend_enter(struct suspend_context *context);
+
+/* High-level CPU suspend which will save context and call finish() */
+int cpu_suspend(unsigned long arg,
+ int (*finish)(unsigned long arg,
+ unsigned long entry,
+ unsigned long context));
+
+/* Low-level CPU resume entry function */
+int __cpu_resume_enter(unsigned long hartid, unsigned long context);
+
+#endif
diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h
index 60da0dcacf14..74d888c8d631 100644
--- a/arch/riscv/include/asm/thread_info.h
+++ b/arch/riscv/include/asm/thread_info.h
@@ -11,11 +11,17 @@
#include <asm/page.h>
#include <linux/const.h>
+#ifdef CONFIG_KASAN
+#define KASAN_STACK_ORDER 1
+#else
+#define KASAN_STACK_ORDER 0
+#endif
+
/* thread information allocation */
#ifdef CONFIG_64BIT
-#define THREAD_SIZE_ORDER (2)
+#define THREAD_SIZE_ORDER (2 + KASAN_STACK_ORDER)
#else
-#define THREAD_SIZE_ORDER (1)
+#define THREAD_SIZE_ORDER (1 + KASAN_STACK_ORDER)
#endif
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile
index e0133d113216..87adbe47bc15 100644
--- a/arch/riscv/kernel/Makefile
+++ b/arch/riscv/kernel/Makefile
@@ -48,6 +48,8 @@ obj-$(CONFIG_RISCV_BOOT_SPINWAIT) += cpu_ops_spinwait.o
obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_MODULE_SECTIONS) += module-sections.o
+obj-$(CONFIG_CPU_PM) += suspend_entry.o suspend.o
+
obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o
obj-$(CONFIG_DYNAMIC_FTRACE) += mcount-dyn.o
diff --git a/arch/riscv/kernel/asm-offsets.c b/arch/riscv/kernel/asm-offsets.c
index df0519a64eaf..df9444397908 100644
--- a/arch/riscv/kernel/asm-offsets.c
+++ b/arch/riscv/kernel/asm-offsets.c
@@ -13,6 +13,7 @@
#include <asm/thread_info.h>
#include <asm/ptrace.h>
#include <asm/cpu_ops_sbi.h>
+#include <asm/suspend.h>
void asm_offsets(void);
@@ -113,6 +114,8 @@ void asm_offsets(void)
OFFSET(PT_BADADDR, pt_regs, badaddr);
OFFSET(PT_CAUSE, pt_regs, cause);
+ OFFSET(SUSPEND_CONTEXT_REGS, suspend_context, regs);
+
OFFSET(KVM_ARCH_GUEST_ZERO, kvm_vcpu_arch, guest_context.zero);
OFFSET(KVM_ARCH_GUEST_RA, kvm_vcpu_arch, guest_context.ra);
OFFSET(KVM_ARCH_GUEST_SP, kvm_vcpu_arch, guest_context.sp);
diff --git a/arch/riscv/kernel/cpu.c b/arch/riscv/kernel/cpu.c
index d2a936195295..ccb617791e56 100644
--- a/arch/riscv/kernel/cpu.c
+++ b/arch/riscv/kernel/cpu.c
@@ -69,11 +69,11 @@ int riscv_of_parent_hartid(struct device_node *node)
.uprop = #UPROP, \
.isa_ext_id = EXTID, \
}
-/**
+/*
* Here are the ordering rules of extension naming defined by RISC-V
* specification :
* 1. All extensions should be separated from other multi-letter extensions
- * from other multi-letter extensions by an underscore.
+ * by an underscore.
* 2. The first letter following the 'Z' conventionally indicates the most
* closely related alphabetical extension category, IMAFDQLCBKJTPVH.
* If multiple 'Z' extensions are named, they should be ordered first
@@ -110,7 +110,7 @@ static void print_isa_ext(struct seq_file *f)
}
}
-/**
+/*
* These are the only valid base (single letter) ISA extensions as per the spec.
* It also specifies the canonical order in which it appears in the spec.
* Some of the extension may just be a place holder for now (B, K, P, J).
diff --git a/arch/riscv/kernel/cpu_ops_sbi.c b/arch/riscv/kernel/cpu_ops_sbi.c
index 2e16f6732cdf..4f5a6f84e2a4 100644
--- a/arch/riscv/kernel/cpu_ops_sbi.c
+++ b/arch/riscv/kernel/cpu_ops_sbi.c
@@ -21,7 +21,7 @@ const struct cpu_operations cpu_ops_sbi;
* be invoked from multiple threads in parallel. Define a per cpu data
* to handle that.
*/
-DEFINE_PER_CPU(struct sbi_hart_boot_data, boot_data);
+static DEFINE_PER_CPU(struct sbi_hart_boot_data, boot_data);
static int sbi_hsm_hart_start(unsigned long hartid, unsigned long saddr,
unsigned long priv)
diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S
index ec07f991866a..893b8bb69391 100644
--- a/arch/riscv/kernel/head.S
+++ b/arch/riscv/kernel/head.S
@@ -16,26 +16,6 @@
#include <asm/image.h>
#include "efi-header.S"
-#ifdef CONFIG_XIP_KERNEL
-.macro XIP_FIXUP_OFFSET reg
- REG_L t0, _xip_fixup
- add \reg, \reg, t0
-.endm
-.macro XIP_FIXUP_FLASH_OFFSET reg
- la t0, __data_loc
- REG_L t1, _xip_phys_offset
- sub \reg, \reg, t1
- add \reg, \reg, t0
-.endm
-_xip_fixup: .dword CONFIG_PHYS_RAM_BASE - CONFIG_XIP_PHYS_ADDR - XIP_OFFSET
-_xip_phys_offset: .dword CONFIG_XIP_PHYS_ADDR + XIP_OFFSET
-#else
-.macro XIP_FIXUP_OFFSET reg
-.endm
-.macro XIP_FIXUP_FLASH_OFFSET reg
-.endm
-#endif /* CONFIG_XIP_KERNEL */
-
__HEAD
ENTRY(_start)
/*
@@ -89,7 +69,8 @@ pe_head_start:
.align 2
#ifdef CONFIG_MMU
-relocate:
+ .global relocate_enable_mmu
+relocate_enable_mmu:
/* Relocate return address */
la a1, kernel_map
XIP_FIXUP_OFFSET a1
@@ -184,7 +165,7 @@ secondary_start_sbi:
/* Enable virtual memory and relocate to virtual address */
la a0, swapper_pg_dir
XIP_FIXUP_OFFSET a0
- call relocate
+ call relocate_enable_mmu
#endif
call setup_trap_vector
tail smp_callin
@@ -328,7 +309,7 @@ clear_bss_done:
#ifdef CONFIG_MMU
la a0, early_pg_dir
XIP_FIXUP_OFFSET a0
- call relocate
+ call relocate_enable_mmu
#endif /* CONFIG_MMU */
call setup_trap_vector
diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c
index 4a48287513c3..c29cef90d1dd 100644
--- a/arch/riscv/kernel/module.c
+++ b/arch/riscv/kernel/module.c
@@ -69,7 +69,7 @@ static int apply_r_riscv_jal_rela(struct module *me, u32 *location,
return 0;
}
-static int apply_r_riscv_rcv_branch_rela(struct module *me, u32 *location,
+static int apply_r_riscv_rvc_branch_rela(struct module *me, u32 *location,
Elf_Addr v)
{
ptrdiff_t offset = (void *)v - (void *)location;
@@ -301,7 +301,7 @@ static int (*reloc_handlers_rela[]) (struct module *me, u32 *location,
[R_RISCV_64] = apply_r_riscv_64_rela,
[R_RISCV_BRANCH] = apply_r_riscv_branch_rela,
[R_RISCV_JAL] = apply_r_riscv_jal_rela,
- [R_RISCV_RVC_BRANCH] = apply_r_riscv_rcv_branch_rela,
+ [R_RISCV_RVC_BRANCH] = apply_r_riscv_rvc_branch_rela,
[R_RISCV_RVC_JUMP] = apply_r_riscv_rvc_jump_rela,
[R_RISCV_PCREL_HI20] = apply_r_riscv_pcrel_hi20_rela,
[R_RISCV_PCREL_LO12_I] = apply_r_riscv_pcrel_lo12_i_rela,
diff --git a/arch/riscv/kernel/perf_callchain.c b/arch/riscv/kernel/perf_callchain.c
index 55faa4991b87..3348a61de7d9 100644
--- a/arch/riscv/kernel/perf_callchain.c
+++ b/arch/riscv/kernel/perf_callchain.c
@@ -68,7 +68,7 @@ void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
static bool fill_callchain(void *entry, unsigned long pc)
{
- return perf_callchain_store(entry, pc);
+ return perf_callchain_store(entry, pc) == 0;
}
void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c
index 03ac3aa611f5..504b496787aa 100644
--- a/arch/riscv/kernel/process.c
+++ b/arch/riscv/kernel/process.c
@@ -23,6 +23,7 @@
#include <asm/string.h>
#include <asm/switch_to.h>
#include <asm/thread_info.h>
+#include <asm/cpuidle.h>
register unsigned long gp_in_global __asm__("gp");
@@ -37,7 +38,7 @@ extern asmlinkage void ret_from_kernel_thread(void);
void arch_cpu_idle(void)
{
- wait_for_interrupt();
+ cpu_do_idle();
raw_local_irq_enable();
}
diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c
index 14d2b53ec322..08d11a53f39e 100644
--- a/arch/riscv/kernel/stacktrace.c
+++ b/arch/riscv/kernel/stacktrace.c
@@ -14,8 +14,6 @@
#include <asm/stacktrace.h>
-register unsigned long sp_in_global __asm__("sp");
-
#ifdef CONFIG_FRAME_POINTER
void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
@@ -30,7 +28,7 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
pc = instruction_pointer(regs);
} else if (task == NULL || task == current) {
fp = (unsigned long)__builtin_frame_address(0);
- sp = sp_in_global;
+ sp = current_stack_pointer;
pc = (unsigned long)walk_stackframe;
} else {
/* task blocked in __switch_to */
@@ -78,7 +76,7 @@ void notrace walk_stackframe(struct task_struct *task,
sp = user_stack_pointer(regs);
pc = instruction_pointer(regs);
} else if (task == NULL || task == current) {
- sp = sp_in_global;
+ sp = current_stack_pointer;
pc = (unsigned long)walk_stackframe;
} else {
/* task blocked in __switch_to */
diff --git a/arch/riscv/kernel/suspend.c b/arch/riscv/kernel/suspend.c
new file mode 100644
index 000000000000..9ba24fb8cc93
--- /dev/null
+++ b/arch/riscv/kernel/suspend.c
@@ -0,0 +1,87 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021 Western Digital Corporation or its affiliates.
+ * Copyright (c) 2022 Ventana Micro Systems Inc.
+ */
+
+#include <linux/ftrace.h>
+#include <asm/csr.h>
+#include <asm/suspend.h>
+
+static void suspend_save_csrs(struct suspend_context *context)
+{
+ context->scratch = csr_read(CSR_SCRATCH);
+ context->tvec = csr_read(CSR_TVEC);
+ context->ie = csr_read(CSR_IE);
+
+ /*
+ * No need to save/restore IP CSR (i.e. MIP or SIP) because:
+ *
+ * 1. For no-MMU (M-mode) kernel, the bits in MIP are set by
+ * external devices (such as interrupt controller, timer, etc).
+ * 2. For MMU (S-mode) kernel, the bits in SIP are set by
+ * M-mode firmware and external devices (such as interrupt
+ * controller, etc).
+ */
+
+#ifdef CONFIG_MMU
+ context->satp = csr_read(CSR_SATP);
+#endif
+}
+
+static void suspend_restore_csrs(struct suspend_context *context)
+{
+ csr_write(CSR_SCRATCH, context->scratch);
+ csr_write(CSR_TVEC, context->tvec);
+ csr_write(CSR_IE, context->ie);
+
+#ifdef CONFIG_MMU
+ csr_write(CSR_SATP, context->satp);
+#endif
+}
+
+int cpu_suspend(unsigned long arg,
+ int (*finish)(unsigned long arg,
+ unsigned long entry,
+ unsigned long context))
+{
+ int rc = 0;
+ struct suspend_context context = { 0 };
+
+ /* Finisher should be non-NULL */
+ if (!finish)
+ return -EINVAL;
+
+ /* Save additional CSRs*/
+ suspend_save_csrs(&context);
+
+ /*
+ * Function graph tracer state gets incosistent when the kernel
+ * calls functions that never return (aka finishers) hence disable
+ * graph tracing during their execution.
+ */
+ pause_graph_tracing();
+
+ /* Save context on stack */
+ if (__cpu_suspend_enter(&context)) {
+ /* Call the finisher */
+ rc = finish(arg, __pa_symbol(__cpu_resume_enter),
+ (ulong)&context);
+
+ /*
+ * Should never reach here, unless the suspend finisher
+ * fails. Successful cpu_suspend() should return from
+ * __cpu_resume_entry()
+ */
+ if (!rc)
+ rc = -EOPNOTSUPP;
+ }
+
+ /* Enable function graph tracer */
+ unpause_graph_tracing();
+
+ /* Restore additional CSRs */
+ suspend_restore_csrs(&context);
+
+ return rc;
+}
diff --git a/arch/riscv/kernel/suspend_entry.S b/arch/riscv/kernel/suspend_entry.S
new file mode 100644
index 000000000000..4b07b809a2b8
--- /dev/null
+++ b/arch/riscv/kernel/suspend_entry.S
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021 Western Digital Corporation or its affiliates.
+ * Copyright (c) 2022 Ventana Micro Systems Inc.
+ */
+
+#include <linux/linkage.h>
+#include <asm/asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/csr.h>
+
+ .text
+ .altmacro
+ .option norelax
+
+ENTRY(__cpu_suspend_enter)
+ /* Save registers (except A0 and T0-T6) */
+ REG_S ra, (SUSPEND_CONTEXT_REGS + PT_RA)(a0)
+ REG_S sp, (SUSPEND_CONTEXT_REGS + PT_SP)(a0)
+ REG_S gp, (SUSPEND_CONTEXT_REGS + PT_GP)(a0)
+ REG_S tp, (SUSPEND_CONTEXT_REGS + PT_TP)(a0)
+ REG_S s0, (SUSPEND_CONTEXT_REGS + PT_S0)(a0)
+ REG_S s1, (SUSPEND_CONTEXT_REGS + PT_S1)(a0)
+ REG_S a1, (SUSPEND_CONTEXT_REGS + PT_A1)(a0)
+ REG_S a2, (SUSPEND_CONTEXT_REGS + PT_A2)(a0)
+ REG_S a3, (SUSPEND_CONTEXT_REGS + PT_A3)(a0)
+ REG_S a4, (SUSPEND_CONTEXT_REGS + PT_A4)(a0)
+ REG_S a5, (SUSPEND_CONTEXT_REGS + PT_A5)(a0)
+ REG_S a6, (SUSPEND_CONTEXT_REGS + PT_A6)(a0)
+ REG_S a7, (SUSPEND_CONTEXT_REGS + PT_A7)(a0)
+ REG_S s2, (SUSPEND_CONTEXT_REGS + PT_S2)(a0)
+ REG_S s3, (SUSPEND_CONTEXT_REGS + PT_S3)(a0)
+ REG_S s4, (SUSPEND_CONTEXT_REGS + PT_S4)(a0)
+ REG_S s5, (SUSPEND_CONTEXT_REGS + PT_S5)(a0)
+ REG_S s6, (SUSPEND_CONTEXT_REGS + PT_S6)(a0)
+ REG_S s7, (SUSPEND_CONTEXT_REGS + PT_S7)(a0)
+ REG_S s8, (SUSPEND_CONTEXT_REGS + PT_S8)(a0)
+ REG_S s9, (SUSPEND_CONTEXT_REGS + PT_S9)(a0)
+ REG_S s10, (SUSPEND_CONTEXT_REGS + PT_S10)(a0)
+ REG_S s11, (SUSPEND_CONTEXT_REGS + PT_S11)(a0)
+
+ /* Save CSRs */
+ csrr t0, CSR_EPC
+ REG_S t0, (SUSPEND_CONTEXT_REGS + PT_EPC)(a0)
+ csrr t0, CSR_STATUS
+ REG_S t0, (SUSPEND_CONTEXT_REGS + PT_STATUS)(a0)
+ csrr t0, CSR_TVAL
+ REG_S t0, (SUSPEND_CONTEXT_REGS + PT_BADADDR)(a0)
+ csrr t0, CSR_CAUSE
+ REG_S t0, (SUSPEND_CONTEXT_REGS + PT_CAUSE)(a0)
+
+ /* Return non-zero value */
+ li a0, 1
+
+ /* Return to C code */
+ ret
+END(__cpu_suspend_enter)
+
+ENTRY(__cpu_resume_enter)
+ /* Load the global pointer */
+ .option push
+ .option norelax
+ la gp, __global_pointer$
+ .option pop
+
+#ifdef CONFIG_MMU
+ /* Save A0 and A1 */
+ add t0, a0, zero
+ add t1, a1, zero
+
+ /* Enable MMU */
+ la a0, swapper_pg_dir
+ XIP_FIXUP_OFFSET a0
+ call relocate_enable_mmu
+
+ /* Restore A0 and A1 */
+ add a0, t0, zero
+ add a1, t1, zero
+#endif
+
+ /* Make A0 point to suspend context */
+ add a0, a1, zero
+
+ /* Restore CSRs */
+ REG_L t0, (SUSPEND_CONTEXT_REGS + PT_EPC)(a0)
+ csrw CSR_EPC, t0
+ REG_L t0, (SUSPEND_CONTEXT_REGS + PT_STATUS)(a0)
+ csrw CSR_STATUS, t0
+ REG_L t0, (SUSPEND_CONTEXT_REGS + PT_BADADDR)(a0)
+ csrw CSR_TVAL, t0
+ REG_L t0, (SUSPEND_CONTEXT_REGS + PT_CAUSE)(a0)
+ csrw CSR_CAUSE, t0
+
+ /* Restore registers (except A0 and T0-T6) */
+ REG_L ra, (SUSPEND_CONTEXT_REGS + PT_RA)(a0)
+ REG_L sp, (SUSPEND_CONTEXT_REGS + PT_SP)(a0)
+ REG_L gp, (SUSPEND_CONTEXT_REGS + PT_GP)(a0)
+ REG_L tp, (SUSPEND_CONTEXT_REGS + PT_TP)(a0)
+ REG_L s0, (SUSPEND_CONTEXT_REGS + PT_S0)(a0)
+ REG_L s1, (SUSPEND_CONTEXT_REGS + PT_S1)(a0)
+ REG_L a1, (SUSPEND_CONTEXT_REGS + PT_A1)(a0)
+ REG_L a2, (SUSPEND_CONTEXT_REGS + PT_A2)(a0)
+ REG_L a3, (SUSPEND_CONTEXT_REGS + PT_A3)(a0)
+ REG_L a4, (SUSPEND_CONTEXT_REGS + PT_A4)(a0)
+ REG_L a5, (SUSPEND_CONTEXT_REGS + PT_A5)(a0)
+ REG_L a6, (SUSPEND_CONTEXT_REGS + PT_A6)(a0)
+ REG_L a7, (SUSPEND_CONTEXT_REGS + PT_A7)(a0)
+ REG_L s2, (SUSPEND_CONTEXT_REGS + PT_S2)(a0)
+ REG_L s3, (SUSPEND_CONTEXT_REGS + PT_S3)(a0)
+ REG_L s4, (SUSPEND_CONTEXT_REGS + PT_S4)(a0)
+ REG_L s5, (SUSPEND_CONTEXT_REGS + PT_S5)(a0)
+ REG_L s6, (SUSPEND_CONTEXT_REGS + PT_S6)(a0)
+ REG_L s7, (SUSPEND_CONTEXT_REGS + PT_S7)(a0)
+ REG_L s8, (SUSPEND_CONTEXT_REGS + PT_S8)(a0)
+ REG_L s9, (SUSPEND_CONTEXT_REGS + PT_S9)(a0)
+ REG_L s10, (SUSPEND_CONTEXT_REGS + PT_S10)(a0)
+ REG_L s11, (SUSPEND_CONTEXT_REGS + PT_S11)(a0)
+
+ /* Return zero value */
+ add a0, zero, zero
+
+ /* Return to C code */
+ ret
+END(__cpu_resume_enter)
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 9b80e8bed3f6..77b5a03de13a 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -58,6 +58,7 @@ config S390
select ALTERNATE_USER_ADDRESS_SPACE
select ARCH_32BIT_USTAT_F_TINODE
select ARCH_BINFMT_ELF_STATE
+ select ARCH_CORRECT_STACKTRACE_ON_KRETPROBE
select ARCH_ENABLE_MEMORY_HOTPLUG if SPARSEMEM
select ARCH_ENABLE_MEMORY_HOTREMOVE
select ARCH_ENABLE_SPLIT_PMD_PTLOCK if PGTABLE_LEVELS > 2
diff --git a/arch/s390/include/asm/alternative-asm.h b/arch/s390/include/asm/alternative-asm.h
index 955d620db23e..bb3837d7387c 100644
--- a/arch/s390/include/asm/alternative-asm.h
+++ b/arch/s390/include/asm/alternative-asm.h
@@ -37,9 +37,15 @@
* a 2-byte nop if the size of the area is not divisible by 6.
*/
.macro alt_pad_fill bytes
- .fill ( \bytes ) / 6, 6, 0xc0040000
- .fill ( \bytes ) % 6 / 4, 4, 0x47000000
- .fill ( \bytes ) % 6 % 4 / 2, 2, 0x0700
+ .rept ( \bytes ) / 6
+ brcl 0,0
+ .endr
+ .rept ( \bytes ) % 6 / 4
+ nop
+ .endr
+ .rept ( \bytes ) % 6 % 4 / 2
+ nopr
+ .endr
.endm
/*
diff --git a/arch/s390/include/asm/alternative.h b/arch/s390/include/asm/alternative.h
index d3880ca764ee..3f2856ed6808 100644
--- a/arch/s390/include/asm/alternative.h
+++ b/arch/s390/include/asm/alternative.h
@@ -71,11 +71,18 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
".if " oldinstr_pad_len(num) " > 6\n" \
"\tjg " e_oldinstr_pad_end "f\n" \
"6620:\n" \
- "\t.fill (" oldinstr_pad_len(num) " - (6620b-662b)) / 2, 2, 0x0700\n" \
+ "\t.rept (" oldinstr_pad_len(num) " - (6620b-662b)) / 2\n" \
+ "\tnopr\n" \
".else\n" \
- "\t.fill " oldinstr_pad_len(num) " / 6, 6, 0xc0040000\n" \
- "\t.fill " oldinstr_pad_len(num) " %% 6 / 4, 4, 0x47000000\n" \
- "\t.fill " oldinstr_pad_len(num) " %% 6 %% 4 / 2, 2, 0x0700\n" \
+ "\t.rept " oldinstr_pad_len(num) " / 6\n" \
+ "\t.brcl 0,0\n" \
+ "\t.endr\n" \
+ "\t.rept " oldinstr_pad_len(num) " %% 6 / 4\n" \
+ "\tnop\n" \
+ "\t.endr\n" \
+ "\t.rept " oldinstr_pad_len(num) " %% 6 %% 4 / 2\n" \
+ "\tnopr\n" \
+ ".endr\n" \
".endif\n"
#define OLDINSTR(oldinstr, num) \
diff --git a/arch/s390/include/asm/ap.h b/arch/s390/include/asm/ap.h
index ae75da592ccb..b515cfa62bd9 100644
--- a/arch/s390/include/asm/ap.h
+++ b/arch/s390/include/asm/ap.h
@@ -60,11 +60,11 @@ static inline bool ap_instructions_available(void)
unsigned long reg1 = 0;
asm volatile(
- " lgr 0,%[reg0]\n" /* qid into gr0 */
- " lghi 1,0\n" /* 0 into gr1 */
- " lghi 2,0\n" /* 0 into gr2 */
- " .long 0xb2af0000\n" /* PQAP(TAPQ) */
- "0: la %[reg1],1\n" /* 1 into reg1 */
+ " lgr 0,%[reg0]\n" /* qid into gr0 */
+ " lghi 1,0\n" /* 0 into gr1 */
+ " lghi 2,0\n" /* 0 into gr2 */
+ " .insn rre,0xb2af0000,0,0\n" /* PQAP(TAPQ) */
+ "0: la %[reg1],1\n" /* 1 into reg1 */
"1:\n"
EX_TABLE(0b, 1b)
: [reg1] "+&d" (reg1)
@@ -86,11 +86,11 @@ static inline struct ap_queue_status ap_tapq(ap_qid_t qid, unsigned long *info)
unsigned long reg2;
asm volatile(
- " lgr 0,%[qid]\n" /* qid into gr0 */
- " lghi 2,0\n" /* 0 into gr2 */
- " .long 0xb2af0000\n" /* PQAP(TAPQ) */
- " lgr %[reg1],1\n" /* gr1 (status) into reg1 */
- " lgr %[reg2],2\n" /* gr2 into reg2 */
+ " lgr 0,%[qid]\n" /* qid into gr0 */
+ " lghi 2,0\n" /* 0 into gr2 */
+ " .insn rre,0xb2af0000,0,0\n" /* PQAP(TAPQ) */
+ " lgr %[reg1],1\n" /* gr1 (status) into reg1 */
+ " lgr %[reg2],2\n" /* gr2 into reg2 */
: [reg1] "=&d" (reg1), [reg2] "=&d" (reg2)
: [qid] "d" (qid)
: "cc", "0", "1", "2");
@@ -128,9 +128,9 @@ static inline struct ap_queue_status ap_rapq(ap_qid_t qid)
struct ap_queue_status reg1;
asm volatile(
- " lgr 0,%[reg0]\n" /* qid arg into gr0 */
- " .long 0xb2af0000\n" /* PQAP(RAPQ) */
- " lgr %[reg1],1\n" /* gr1 (status) into reg1 */
+ " lgr 0,%[reg0]\n" /* qid arg into gr0 */
+ " .insn rre,0xb2af0000,0,0\n" /* PQAP(RAPQ) */
+ " lgr %[reg1],1\n" /* gr1 (status) into reg1 */
: [reg1] "=&d" (reg1)
: [reg0] "d" (reg0)
: "cc", "0", "1");
@@ -149,9 +149,9 @@ static inline struct ap_queue_status ap_zapq(ap_qid_t qid)
struct ap_queue_status reg1;
asm volatile(
- " lgr 0,%[reg0]\n" /* qid arg into gr0 */
- " .long 0xb2af0000\n" /* PQAP(ZAPQ) */
- " lgr %[reg1],1\n" /* gr1 (status) into reg1 */
+ " lgr 0,%[reg0]\n" /* qid arg into gr0 */
+ " .insn rre,0xb2af0000,0,0\n" /* PQAP(ZAPQ) */
+ " lgr %[reg1],1\n" /* gr1 (status) into reg1 */
: [reg1] "=&d" (reg1)
: [reg0] "d" (reg0)
: "cc", "0", "1");
@@ -190,10 +190,10 @@ static inline int ap_qci(struct ap_config_info *config)
struct ap_config_info *reg2 = config;
asm volatile(
- " lgr 0,%[reg0]\n" /* QCI fc into gr0 */
- " lgr 2,%[reg2]\n" /* ptr to config into gr2 */
- " .long 0xb2af0000\n" /* PQAP(QCI) */
- "0: la %[reg1],0\n" /* good case, QCI fc available */
+ " lgr 0,%[reg0]\n" /* QCI fc into gr0 */
+ " lgr 2,%[reg2]\n" /* ptr to config into gr2 */
+ " .insn rre,0xb2af0000,0,0\n" /* PQAP(QCI) */
+ "0: la %[reg1],0\n" /* good case, QCI fc available */
"1:\n"
EX_TABLE(0b, 1b)
: [reg1] "+&d" (reg1)
@@ -246,11 +246,11 @@ static inline struct ap_queue_status ap_aqic(ap_qid_t qid,
reg1.qirqctrl = qirqctrl;
asm volatile(
- " lgr 0,%[reg0]\n" /* qid param into gr0 */
- " lgr 1,%[reg1]\n" /* irq ctrl into gr1 */
- " lgr 2,%[reg2]\n" /* ni addr into gr2 */
- " .long 0xb2af0000\n" /* PQAP(AQIC) */
- " lgr %[reg1],1\n" /* gr1 (status) into reg1 */
+ " lgr 0,%[reg0]\n" /* qid param into gr0 */
+ " lgr 1,%[reg1]\n" /* irq ctrl into gr1 */
+ " lgr 2,%[reg2]\n" /* ni addr into gr2 */
+ " .insn rre,0xb2af0000,0,0\n" /* PQAP(AQIC) */
+ " lgr %[reg1],1\n" /* gr1 (status) into reg1 */
: [reg1] "+&d" (reg1)
: [reg0] "d" (reg0), [reg2] "d" (reg2)
: "cc", "0", "1", "2");
@@ -297,11 +297,11 @@ static inline struct ap_queue_status ap_qact(ap_qid_t qid, int ifbit,
reg1.value = apinfo->val;
asm volatile(
- " lgr 0,%[reg0]\n" /* qid param into gr0 */
- " lgr 1,%[reg1]\n" /* qact in info into gr1 */
- " .long 0xb2af0000\n" /* PQAP(QACT) */
- " lgr %[reg1],1\n" /* gr1 (status) into reg1 */
- " lgr %[reg2],2\n" /* qact out info into reg2 */
+ " lgr 0,%[reg0]\n" /* qid param into gr0 */
+ " lgr 1,%[reg1]\n" /* qact in info into gr1 */
+ " .insn rre,0xb2af0000,0,0\n" /* PQAP(QACT) */
+ " lgr %[reg1],1\n" /* gr1 (status) into reg1 */
+ " lgr %[reg2],2\n" /* qact out info into reg2 */
: [reg1] "+&d" (reg1), [reg2] "=&d" (reg2)
: [reg0] "d" (reg0)
: "cc", "0", "1", "2");
diff --git a/arch/s390/include/asm/ctl_reg.h b/arch/s390/include/asm/ctl_reg.h
index c800199a376b..82388da3f95f 100644
--- a/arch/s390/include/asm/ctl_reg.h
+++ b/arch/s390/include/asm/ctl_reg.h
@@ -74,8 +74,17 @@ static __always_inline void __ctl_clear_bit(unsigned int cr, unsigned int bit)
__ctl_load(reg, cr, cr);
}
-void smp_ctl_set_bit(int cr, int bit);
-void smp_ctl_clear_bit(int cr, int bit);
+void smp_ctl_set_clear_bit(int cr, int bit, bool set);
+
+static inline void ctl_set_bit(int cr, int bit)
+{
+ smp_ctl_set_clear_bit(cr, bit, true);
+}
+
+static inline void ctl_clear_bit(int cr, int bit)
+{
+ smp_ctl_set_clear_bit(cr, bit, false);
+}
union ctlreg0 {
unsigned long val;
@@ -130,8 +139,5 @@ union ctlreg15 {
};
};
-#define ctl_set_bit(cr, bit) smp_ctl_set_bit(cr, bit)
-#define ctl_clear_bit(cr, bit) smp_ctl_clear_bit(cr, bit)
-
#endif /* __ASSEMBLY__ */
#endif /* __ASM_CTL_REG_H */
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 84ec63145325..eee8d96fb38e 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -319,11 +319,18 @@ extern void (*s390_base_pgm_handler_fn)(struct pt_regs *regs);
extern int memcpy_real(void *, unsigned long, size_t);
extern void memcpy_absolute(void *, void *, size_t);
-#define mem_assign_absolute(dest, val) do { \
- __typeof__(dest) __tmp = (val); \
- \
- BUILD_BUG_ON(sizeof(__tmp) != sizeof(val)); \
- memcpy_absolute(&(dest), &__tmp, sizeof(__tmp)); \
+#define put_abs_lowcore(member, x) do { \
+ unsigned long __abs_address = offsetof(struct lowcore, member); \
+ __typeof__(((struct lowcore *)0)->member) __tmp = (x); \
+ \
+ memcpy_absolute(__va(__abs_address), &__tmp, sizeof(__tmp)); \
+} while (0)
+
+#define get_abs_lowcore(x, member) do { \
+ unsigned long __abs_address = offsetof(struct lowcore, member); \
+ __typeof__(((struct lowcore *)0)->member) *__ptr = &(x); \
+ \
+ memcpy_absolute(__ptr, __va(__abs_address), sizeof(*__ptr)); \
} while (0)
extern int s390_isolate_bp(void);
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
index 888a2f1c9ee3..24a54443c865 100644
--- a/arch/s390/include/asm/spinlock.h
+++ b/arch/s390/include/asm/spinlock.h
@@ -78,7 +78,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lp)
{
typecheck(int, lp->lock);
asm_inline volatile(
- ALTERNATIVE("", ".long 0xb2fa0070", 49) /* NIAI 7 */
+ ALTERNATIVE("", ".insn rre,0xb2fa0000,7,0", 49) /* NIAI 7 */
" sth %1,%0\n"
: "=R" (((unsigned short *) &lp->lock)[1])
: "d" (0) : "cc", "memory");
diff --git a/arch/s390/include/asm/syscall_wrapper.h b/arch/s390/include/asm/syscall_wrapper.h
index ad2c996e7e93..fde7e6b1df48 100644
--- a/arch/s390/include/asm/syscall_wrapper.h
+++ b/arch/s390/include/asm/syscall_wrapper.h
@@ -162,4 +162,4 @@
__diag_pop(); \
static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))
-#endif /* _ASM_X86_SYSCALL_WRAPPER_H */
+#endif /* _ASM_S390_SYSCALL_WRAPPER_H */
diff --git a/arch/s390/include/asm/unwind.h b/arch/s390/include/asm/unwind.h
index 5ebf534ef753..0bf06f1682d8 100644
--- a/arch/s390/include/asm/unwind.h
+++ b/arch/s390/include/asm/unwind.h
@@ -4,6 +4,8 @@
#include <linux/sched.h>
#include <linux/ftrace.h>
+#include <linux/kprobes.h>
+#include <linux/llist.h>
#include <asm/ptrace.h>
#include <asm/stacktrace.h>
@@ -36,10 +38,21 @@ struct unwind_state {
struct pt_regs *regs;
unsigned long sp, ip;
int graph_idx;
+ struct llist_node *kr_cur;
bool reliable;
bool error;
};
+/* Recover the return address modified by kretprobe and ftrace_graph. */
+static inline unsigned long unwind_recover_ret_addr(struct unwind_state *state,
+ unsigned long ip)
+{
+ ip = ftrace_graph_ret_addr(state->task, &state->graph_idx, ip, NULL);
+ if (is_kretprobe_trampoline(ip))
+ ip = kretprobe_find_ret_addr(state->task, (void *)state->sp, &state->kr_cur);
+ return ip;
+}
+
void __unwind_start(struct unwind_state *state, struct task_struct *task,
struct pt_regs *regs, unsigned long first_frame);
bool unwind_next_frame(struct unwind_state *state);
diff --git a/arch/s390/include/asm/user.h b/arch/s390/include/asm/user.h
index 0ca572ced21b..8e8aaf48582e 100644
--- a/arch/s390/include/asm/user.h
+++ b/arch/s390/include/asm/user.h
@@ -67,9 +67,5 @@ struct user {
unsigned long magic; /* To uniquely identify a core file */
char u_comm[32]; /* User command that was responsible */
};
-#define NBPG PAGE_SIZE
-#define UPAGES 1
-#define HOST_TEXT_START_ADDR (u.start_code)
-#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG)
#endif /* _S390_USER_H */
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index a601a518b569..59b69c8ab5e1 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -121,22 +121,22 @@ _LPP_OFFSET = __LC_LPP
.endm
.macro BPOFF
- ALTERNATIVE "", ".long 0xb2e8c000", 82
+ ALTERNATIVE "", ".insn rrf,0xb2e80000,0,0,12,0", 82
.endm
.macro BPON
- ALTERNATIVE "", ".long 0xb2e8d000", 82
+ ALTERNATIVE "", ".insn rrf,0xb2e80000,0,0,13,0", 82
.endm
.macro BPENTER tif_ptr,tif_mask
- ALTERNATIVE "TSTMSK \tif_ptr,\tif_mask; jz .+8; .long 0xb2e8d000", \
+ ALTERNATIVE "TSTMSK \tif_ptr,\tif_mask; jz .+8; .insn rrf,0xb2e80000,0,0,13,0", \
"", 82
.endm
.macro BPEXIT tif_ptr,tif_mask
TSTMSK \tif_ptr,\tif_mask
- ALTERNATIVE "jz .+8; .long 0xb2e8c000", \
- "jnz .+8; .long 0xb2e8d000", 82
+ ALTERNATIVE "jz .+8; .insn rrf,0xb2e80000,0,0,12,0", \
+ "jnz .+8; .insn rrf,0xb2e80000,0,0,13,0", 82
.endm
/*
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 28ae7df26c4a..1cc85b8ff42e 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -1646,8 +1646,8 @@ static void dump_reipl_run(struct shutdown_trigger *trigger)
csum = (__force unsigned int)
csum_partial(reipl_block_actual, reipl_block_actual->hdr.len, 0);
- mem_assign_absolute(S390_lowcore.ipib, ipib);
- mem_assign_absolute(S390_lowcore.ipib_checksum, csum);
+ put_abs_lowcore(ipib, ipib);
+ put_abs_lowcore(ipib_checksum, csum);
dump_run(trigger);
}
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index e32c14fd1282..0032bdbe8e3f 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -284,11 +284,11 @@ NOKPROBE_SYMBOL(pop_kprobe);
void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs)
{
- ri->ret_addr = (kprobe_opcode_t *) regs->gprs[14];
- ri->fp = NULL;
+ ri->ret_addr = (kprobe_opcode_t *)regs->gprs[14];
+ ri->fp = (void *)regs->gprs[15];
/* Replace the return addr with trampoline addr */
- regs->gprs[14] = (unsigned long) &__kretprobe_trampoline;
+ regs->gprs[14] = (unsigned long)&__kretprobe_trampoline;
}
NOKPROBE_SYMBOL(arch_prepare_kretprobe);
@@ -385,7 +385,7 @@ NOKPROBE_SYMBOL(arch_kretprobe_fixup_return);
*/
void trampoline_probe_handler(struct pt_regs *regs)
{
- kretprobe_trampoline_handler(regs, NULL);
+ kretprobe_trampoline_handler(regs, (void *)regs->gprs[15]);
}
NOKPROBE_SYMBOL(trampoline_probe_handler);
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index 088d57a3083f..b2ef014a9287 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -226,7 +226,7 @@ void arch_crash_save_vmcoreinfo(void)
vmcoreinfo_append_str("SAMODE31=%lx\n", __samode31);
vmcoreinfo_append_str("EAMODE31=%lx\n", __eamode31);
vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset());
- mem_assign_absolute(S390_lowcore.vmcore_info, paddr_vmcoreinfo_note());
+ put_abs_lowcore(vmcore_info, paddr_vmcoreinfo_note());
}
void machine_shutdown(void)
diff --git a/arch/s390/kernel/os_info.c b/arch/s390/kernel/os_info.c
index 6b5b64e67eee..1acc2e05d70f 100644
--- a/arch/s390/kernel/os_info.c
+++ b/arch/s390/kernel/os_info.c
@@ -63,7 +63,7 @@ void __init os_info_init(void)
os_info.version_minor = OS_INFO_VERSION_MINOR;
os_info.magic = OS_INFO_MAGIC;
os_info.csum = os_info_csum(&os_info);
- mem_assign_absolute(S390_lowcore.os_info, __pa(ptr));
+ put_abs_lowcore(os_info, __pa(ptr));
}
#ifdef CONFIG_CRASH_DUMP
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 84e23fcc1106..d860ac300919 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -481,11 +481,11 @@ static void __init setup_lowcore_dat_off(void)
lc->mcck_stack = mcck_stack + STACK_INIT_OFFSET;
/* Setup absolute zero lowcore */
- mem_assign_absolute(S390_lowcore.restart_stack, lc->restart_stack);
- mem_assign_absolute(S390_lowcore.restart_fn, lc->restart_fn);
- mem_assign_absolute(S390_lowcore.restart_data, lc->restart_data);
- mem_assign_absolute(S390_lowcore.restart_source, lc->restart_source);
- mem_assign_absolute(S390_lowcore.restart_psw, lc->restart_psw);
+ put_abs_lowcore(restart_stack, lc->restart_stack);
+ put_abs_lowcore(restart_fn, lc->restart_fn);
+ put_abs_lowcore(restart_data, lc->restart_data);
+ put_abs_lowcore(restart_source, lc->restart_source);
+ put_abs_lowcore(restart_psw, lc->restart_psw);
lc->spinlock_lockval = arch_spin_lockval(0);
lc->spinlock_index = 0;
@@ -501,6 +501,7 @@ static void __init setup_lowcore_dat_off(void)
static void __init setup_lowcore_dat_on(void)
{
struct lowcore *lc = lowcore_ptr[0];
+ int cr;
__ctl_clear_bit(0, 28);
S390_lowcore.external_new_psw.mask |= PSW_MASK_DAT;
@@ -509,10 +510,10 @@ static void __init setup_lowcore_dat_on(void)
S390_lowcore.io_new_psw.mask |= PSW_MASK_DAT;
__ctl_store(S390_lowcore.cregs_save_area, 0, 15);
__ctl_set_bit(0, 28);
- mem_assign_absolute(S390_lowcore.restart_flags, RESTART_FLAG_CTLREGS);
- mem_assign_absolute(S390_lowcore.program_new_psw, lc->program_new_psw);
- memcpy_absolute(&S390_lowcore.cregs_save_area, lc->cregs_save_area,
- sizeof(S390_lowcore.cregs_save_area));
+ put_abs_lowcore(restart_flags, RESTART_FLAG_CTLREGS);
+ put_abs_lowcore(program_new_psw, lc->program_new_psw);
+ for (cr = 0; cr < ARRAY_SIZE(lc->cregs_save_area); cr++)
+ put_abs_lowcore(cregs_save_area[cr], lc->cregs_save_area[cr]);
}
static struct resource code_resource = {
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 127da1850b06..30c91d565933 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -213,7 +213,7 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
if (nmi_alloc_mcesa(&lc->mcesad))
goto out;
lowcore_ptr[cpu] = lc;
- pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, (u32)(unsigned long) lc);
+ pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, __pa(lc));
return 0;
out:
@@ -326,10 +326,17 @@ static void pcpu_delegate(struct pcpu *pcpu,
/* Stop target cpu (if func returns this stops the current cpu). */
pcpu_sigp_retry(pcpu, SIGP_STOP, 0);
/* Restart func on the target cpu and stop the current cpu. */
- mem_assign_absolute(lc->restart_stack, stack);
- mem_assign_absolute(lc->restart_fn, (unsigned long) func);
- mem_assign_absolute(lc->restart_data, (unsigned long) data);
- mem_assign_absolute(lc->restart_source, source_cpu);
+ if (lc) {
+ lc->restart_stack = stack;
+ lc->restart_fn = (unsigned long)func;
+ lc->restart_data = (unsigned long)data;
+ lc->restart_source = source_cpu;
+ } else {
+ put_abs_lowcore(restart_stack, stack);
+ put_abs_lowcore(restart_fn, (unsigned long)func);
+ put_abs_lowcore(restart_data, (unsigned long)data);
+ put_abs_lowcore(restart_source, source_cpu);
+ }
__bpon();
asm volatile(
"0: sigp 0,%0,%2 # sigp restart to target cpu\n"
@@ -570,39 +577,27 @@ static void smp_ctl_bit_callback(void *info)
}
static DEFINE_SPINLOCK(ctl_lock);
-static unsigned long ctlreg;
-/*
- * Set a bit in a control register of all cpus
- */
-void smp_ctl_set_bit(int cr, int bit)
+void smp_ctl_set_clear_bit(int cr, int bit, bool set)
{
- struct ec_creg_mask_parms parms = { 1UL << bit, -1UL, cr };
-
- spin_lock(&ctl_lock);
- memcpy_absolute(&ctlreg, &S390_lowcore.cregs_save_area[cr], sizeof(ctlreg));
- __set_bit(bit, &ctlreg);
- memcpy_absolute(&S390_lowcore.cregs_save_area[cr], &ctlreg, sizeof(ctlreg));
- spin_unlock(&ctl_lock);
- on_each_cpu(smp_ctl_bit_callback, &parms, 1);
-}
-EXPORT_SYMBOL(smp_ctl_set_bit);
-
-/*
- * Clear a bit in a control register of all cpus
- */
-void smp_ctl_clear_bit(int cr, int bit)
-{
- struct ec_creg_mask_parms parms = { 0, ~(1UL << bit), cr };
+ struct ec_creg_mask_parms parms = { .cr = cr, };
+ u64 ctlreg;
+ if (set) {
+ parms.orval = 1UL << bit;
+ parms.andval = -1UL;
+ } else {
+ parms.orval = 0;
+ parms.andval = ~(1UL << bit);
+ }
spin_lock(&ctl_lock);
- memcpy_absolute(&ctlreg, &S390_lowcore.cregs_save_area[cr], sizeof(ctlreg));
- __clear_bit(bit, &ctlreg);
- memcpy_absolute(&S390_lowcore.cregs_save_area[cr], &ctlreg, sizeof(ctlreg));
+ get_abs_lowcore(ctlreg, cregs_save_area[cr]);
+ ctlreg = (ctlreg & parms.andval) | parms.orval;
+ put_abs_lowcore(cregs_save_area[cr], ctlreg);
spin_unlock(&ctl_lock);
on_each_cpu(smp_ctl_bit_callback, &parms, 1);
}
-EXPORT_SYMBOL(smp_ctl_clear_bit);
+EXPORT_SYMBOL(smp_ctl_set_clear_bit);
#ifdef CONFIG_CRASH_DUMP
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index 674c65019434..1d2aa448d103 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -141,10 +141,10 @@ static inline void do_fp_trap(struct pt_regs *regs, __u32 fpc)
do_trap(regs, SIGFPE, si_code, "floating point exception");
}
-static void translation_exception(struct pt_regs *regs)
+static void translation_specification_exception(struct pt_regs *regs)
{
/* May never happen. */
- panic("Translation exception");
+ panic("Translation-Specification Exception");
}
static void illegal_op(struct pt_regs *regs)
@@ -368,7 +368,7 @@ static void (*pgm_check_table[128])(struct pt_regs *regs) = {
[0x0f] = hfp_divide_exception,
[0x10] = do_dat_exception,
[0x11] = do_dat_exception,
- [0x12] = translation_exception,
+ [0x12] = translation_specification_exception,
[0x13] = special_op_exception,
[0x14] = default_trap_handler,
[0x15] = operand_exception,
diff --git a/arch/s390/kernel/unwind_bc.c b/arch/s390/kernel/unwind_bc.c
index 707fd99f6734..0ece156fdd7c 100644
--- a/arch/s390/kernel/unwind_bc.c
+++ b/arch/s390/kernel/unwind_bc.c
@@ -64,8 +64,8 @@ bool unwind_next_frame(struct unwind_state *state)
ip = READ_ONCE_NOCHECK(sf->gprs[8]);
reliable = false;
regs = NULL;
- if (!__kernel_text_address(ip)) {
- /* skip bogus %r14 */
+ /* skip bogus %r14 or if is the same as regs->psw.addr */
+ if (!__kernel_text_address(ip) || state->ip == unwind_recover_ret_addr(state, ip)) {
state->regs = NULL;
return unwind_next_frame(state);
}
@@ -103,13 +103,11 @@ bool unwind_next_frame(struct unwind_state *state)
if (sp & 0x7)
goto out_err;
- ip = ftrace_graph_ret_addr(state->task, &state->graph_idx, ip, (void *) sp);
-
/* Update unwind state */
state->sp = sp;
- state->ip = ip;
state->regs = regs;
state->reliable = reliable;
+ state->ip = unwind_recover_ret_addr(state, ip);
return true;
out_err:
@@ -161,12 +159,10 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
ip = READ_ONCE_NOCHECK(sf->gprs[8]);
}
- ip = ftrace_graph_ret_addr(state->task, &state->graph_idx, ip, NULL);
-
/* Update unwind state */
state->sp = sp;
- state->ip = ip;
state->reliable = true;
+ state->ip = unwind_recover_ret_addr(state, ip);
if (!first_frame)
return;
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index ab73d99483fe..156d1c25a3c1 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -3462,7 +3462,7 @@ void exit_sie(struct kvm_vcpu *vcpu)
/* Kick a guest cpu out of SIE to process a request synchronously */
void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
{
- kvm_make_request(req, vcpu);
+ __kvm_make_request(req, vcpu);
kvm_s390_vcpu_request(vcpu);
}
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
index 692dc84cd19c..5e7ea8b111e8 100644
--- a/arch/s390/lib/spinlock.c
+++ b/arch/s390/lib/spinlock.c
@@ -75,7 +75,7 @@ static inline int arch_load_niai4(int *lock)
int owner;
asm_inline volatile(
- ALTERNATIVE("", ".long 0xb2fa0040", 49) /* NIAI 4 */
+ ALTERNATIVE("", ".insn rre,0xb2fa0000,4,0", 49) /* NIAI 4 */
" l %0,%1\n"
: "=d" (owner) : "Q" (*lock) : "memory");
return owner;
@@ -86,7 +86,7 @@ static inline int arch_cmpxchg_niai8(int *lock, int old, int new)
int expected = old;
asm_inline volatile(
- ALTERNATIVE("", ".long 0xb2fa0080", 49) /* NIAI 8 */
+ ALTERNATIVE("", ".insn rre,0xb2fa0000,8,0", 49) /* NIAI 8 */
" cs %0,%3,%1\n"
: "=d" (old), "=Q" (*lock)
: "0" (old), "d" (new), "Q" (*lock)
diff --git a/arch/s390/lib/test_unwind.c b/arch/s390/lib/test_unwind.c
index c01f02887de4..9bb067321ab4 100644
--- a/arch/s390/lib/test_unwind.c
+++ b/arch/s390/lib/test_unwind.c
@@ -47,7 +47,7 @@ static void print_backtrace(char *bt)
static noinline int test_unwind(struct task_struct *task, struct pt_regs *regs,
unsigned long sp)
{
- int frame_count, prev_is_func2, seen_func2_func1;
+ int frame_count, prev_is_func2, seen_func2_func1, seen_kretprobe_trampoline;
const int max_frames = 128;
struct unwind_state state;
size_t bt_pos = 0;
@@ -63,6 +63,7 @@ static noinline int test_unwind(struct task_struct *task, struct pt_regs *regs,
frame_count = 0;
prev_is_func2 = 0;
seen_func2_func1 = 0;
+ seen_kretprobe_trampoline = 0;
unwind_for_each_frame(&state, task, regs, sp) {
unsigned long addr = unwind_get_return_address(&state);
char sym[KSYM_SYMBOL_LEN];
@@ -88,6 +89,8 @@ static noinline int test_unwind(struct task_struct *task, struct pt_regs *regs,
if (prev_is_func2 && str_has_prefix(sym, "unwindme_func1"))
seen_func2_func1 = 1;
prev_is_func2 = str_has_prefix(sym, "unwindme_func2");
+ if (str_has_prefix(sym, "__kretprobe_trampoline+0x0/"))
+ seen_kretprobe_trampoline = 1;
}
/* Check the results. */
@@ -103,6 +106,10 @@ static noinline int test_unwind(struct task_struct *task, struct pt_regs *regs,
kunit_err(current_test, "Maximum number of frames exceeded\n");
ret = -EINVAL;
}
+ if (seen_kretprobe_trampoline) {
+ kunit_err(current_test, "__kretprobe_trampoline+0x0 in unwinding results\n");
+ ret = -EINVAL;
+ }
if (ret || force_bt)
print_backtrace(bt);
kfree(bt);
@@ -132,36 +139,50 @@ static struct unwindme *unwindme;
#define UWM_PGM 0x40 /* Unwind from program check handler */
#define UWM_KPROBE_ON_FTRACE 0x80 /* Unwind from kprobe handler called via ftrace. */
#define UWM_FTRACE 0x100 /* Unwind from ftrace handler. */
-#define UWM_KRETPROBE 0x200 /* Unwind kretprobe handlers. */
+#define UWM_KRETPROBE 0x200 /* Unwind through kretprobed function. */
+#define UWM_KRETPROBE_HANDLER 0x400 /* Unwind from kretprobe handler. */
-static __always_inline unsigned long get_psw_addr(void)
+static __always_inline struct pt_regs fake_pt_regs(void)
{
- unsigned long psw_addr;
+ struct pt_regs regs;
+
+ memset(&regs, 0, sizeof(regs));
+ regs.gprs[15] = current_stack_pointer();
asm volatile(
"basr %[psw_addr],0\n"
- : [psw_addr] "=d" (psw_addr));
- return psw_addr;
+ : [psw_addr] "=d" (regs.psw.addr));
+ return regs;
}
static int kretprobe_ret_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
{
struct unwindme *u = unwindme;
+ if (!(u->flags & UWM_KRETPROBE_HANDLER))
+ return 0;
+
u->ret = test_unwind(NULL, (u->flags & UWM_REGS) ? regs : NULL,
(u->flags & UWM_SP) ? u->sp : 0);
return 0;
}
-static noinline notrace void test_unwind_kretprobed_func(void)
+static noinline notrace int test_unwind_kretprobed_func(struct unwindme *u)
{
- asm volatile(" nop\n");
+ struct pt_regs regs;
+
+ if (!(u->flags & UWM_KRETPROBE))
+ return 0;
+
+ regs = fake_pt_regs();
+ return test_unwind(NULL, (u->flags & UWM_REGS) ? &regs : NULL,
+ (u->flags & UWM_SP) ? u->sp : 0);
}
-static noinline void test_unwind_kretprobed_func_caller(void)
+static noinline int test_unwind_kretprobed_func_caller(struct unwindme *u)
{
- test_unwind_kretprobed_func();
+ return test_unwind_kretprobed_func(u);
}
static int test_unwind_kretprobe(struct unwindme *u)
@@ -187,10 +208,12 @@ static int test_unwind_kretprobe(struct unwindme *u)
return -EINVAL;
}
- test_unwind_kretprobed_func_caller();
+ ret = test_unwind_kretprobed_func_caller(u);
unregister_kretprobe(&my_kretprobe);
unwindme = NULL;
- return u->ret;
+ if (u->flags & UWM_KRETPROBE_HANDLER)
+ ret = u->ret;
+ return ret;
}
static int kprobe_pre_handler(struct kprobe *p, struct pt_regs *regs)
@@ -304,16 +327,13 @@ static noinline int unwindme_func4(struct unwindme *u)
return 0;
} else if (u->flags & (UWM_PGM | UWM_KPROBE_ON_FTRACE)) {
return test_unwind_kprobe(u);
- } else if (u->flags & (UWM_KRETPROBE)) {
+ } else if (u->flags & (UWM_KRETPROBE | UWM_KRETPROBE_HANDLER)) {
return test_unwind_kretprobe(u);
} else if (u->flags & UWM_FTRACE) {
return test_unwind_ftrace(u);
} else {
- struct pt_regs regs;
+ struct pt_regs regs = fake_pt_regs();
- memset(&regs, 0, sizeof(regs));
- regs.psw.addr = get_psw_addr();
- regs.gprs[15] = current_stack_pointer();
return test_unwind(NULL,
(u->flags & UWM_REGS) ? &regs : NULL,
(u->flags & UWM_SP) ? u->sp : 0);
@@ -452,6 +472,10 @@ static const struct test_params param_list[] = {
TEST_WITH_FLAGS(UWM_KRETPROBE | UWM_SP),
TEST_WITH_FLAGS(UWM_KRETPROBE | UWM_REGS),
TEST_WITH_FLAGS(UWM_KRETPROBE | UWM_SP | UWM_REGS),
+ TEST_WITH_FLAGS(UWM_KRETPROBE_HANDLER),
+ TEST_WITH_FLAGS(UWM_KRETPROBE_HANDLER | UWM_SP),
+ TEST_WITH_FLAGS(UWM_KRETPROBE_HANDLER | UWM_REGS),
+ TEST_WITH_FLAGS(UWM_KRETPROBE_HANDLER | UWM_SP | UWM_REGS),
};
/*
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index 792f8e0f2178..e563cb65c0c4 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -69,6 +69,7 @@ struct zpci_dev *get_zdev_by_fid(u32 fid)
list_for_each_entry(tmp, &zpci_list, entry) {
if (tmp->fid == fid) {
zdev = tmp;
+ zpci_zdev_get(zdev);
break;
}
}
@@ -399,7 +400,7 @@ EXPORT_SYMBOL(pci_iounmap);
static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
int size, u32 *val)
{
- struct zpci_dev *zdev = get_zdev_by_bus(bus, devfn);
+ struct zpci_dev *zdev = zdev_from_bus(bus, devfn);
return (zdev) ? zpci_cfg_load(zdev, where, val, size) : -ENODEV;
}
@@ -407,7 +408,7 @@ static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
static int pci_write(struct pci_bus *bus, unsigned int devfn, int where,
int size, u32 val)
{
- struct zpci_dev *zdev = get_zdev_by_bus(bus, devfn);
+ struct zpci_dev *zdev = zdev_from_bus(bus, devfn);
return (zdev) ? zpci_cfg_store(zdev, where, val, size) : -ENODEV;
}
diff --git a/arch/s390/pci/pci_bus.h b/arch/s390/pci/pci_bus.h
index e359d2686178..e96c9860e064 100644
--- a/arch/s390/pci/pci_bus.h
+++ b/arch/s390/pci/pci_bus.h
@@ -19,7 +19,8 @@ void zpci_bus_remove_device(struct zpci_dev *zdev, bool set_error);
void zpci_release_device(struct kref *kref);
static inline void zpci_zdev_put(struct zpci_dev *zdev)
{
- kref_put(&zdev->kref, zpci_release_device);
+ if (zdev)
+ kref_put(&zdev->kref, zpci_release_device);
}
static inline void zpci_zdev_get(struct zpci_dev *zdev)
@@ -32,8 +33,8 @@ void zpci_free_domain(int domain);
int zpci_setup_bus_resources(struct zpci_dev *zdev,
struct list_head *resources);
-static inline struct zpci_dev *get_zdev_by_bus(struct pci_bus *bus,
- unsigned int devfn)
+static inline struct zpci_dev *zdev_from_bus(struct pci_bus *bus,
+ unsigned int devfn)
{
struct zpci_bus *zbus = bus->sysdata;
diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c
index 63f3e057c168..1057d7af4a55 100644
--- a/arch/s390/pci/pci_clp.c
+++ b/arch/s390/pci/pci_clp.c
@@ -23,6 +23,8 @@
#include <asm/clp.h>
#include <uapi/asm/clp.h>
+#include "pci_bus.h"
+
bool zpci_unique_uid;
void update_uid_checking(bool new)
@@ -404,8 +406,11 @@ static void __clp_add(struct clp_fh_list_entry *entry, void *data)
return;
zdev = get_zdev_by_fid(entry->fid);
- if (!zdev)
- zpci_create_device(entry->fid, entry->fh, entry->config_state);
+ if (zdev) {
+ zpci_zdev_put(zdev);
+ return;
+ }
+ zpci_create_device(entry->fid, entry->fh, entry->config_state);
}
int clp_scan_pci_devices(void)
diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c
index 2e3e5b278925..ea9db5cea64e 100644
--- a/arch/s390/pci/pci_event.c
+++ b/arch/s390/pci/pci_event.c
@@ -269,7 +269,7 @@ static void __zpci_event_error(struct zpci_ccdf_err *ccdf)
pdev ? pci_name(pdev) : "n/a", ccdf->pec, ccdf->fid);
if (!pdev)
- return;
+ goto no_pdev;
switch (ccdf->pec) {
case 0x003a: /* Service Action or Error Recovery Successful */
@@ -286,6 +286,8 @@ static void __zpci_event_error(struct zpci_ccdf_err *ccdf)
break;
}
pci_dev_put(pdev);
+no_pdev:
+ zpci_zdev_put(zdev);
}
void zpci_event_error(void *data)
@@ -314,6 +316,7 @@ static void zpci_event_hard_deconfigured(struct zpci_dev *zdev, u32 fh)
static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
{
struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid);
+ bool existing_zdev = !!zdev;
enum zpci_state state;
zpci_dbg(3, "avl fid:%x, fh:%x, pec:%x\n",
@@ -378,6 +381,8 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
default:
break;
}
+ if (existing_zdev)
+ zpci_zdev_put(zdev);
}
void zpci_event_availability(void *data)
diff --git a/arch/sh/include/asm/user.h b/arch/sh/include/asm/user.h
index 7dfd3f6461e6..12ea0f3f4419 100644
--- a/arch/sh/include/asm/user.h
+++ b/arch/sh/include/asm/user.h
@@ -52,10 +52,4 @@ struct user {
char u_comm[32]; /* user command name */
};
-#define NBPG PAGE_SIZE
-#define UPAGES 1
-#define HOST_TEXT_START_ADDR (u.start_code)
-#define HOST_DATA_START_ADDR (u.start_data)
-#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG)
-
#endif /* __ASM_SH_USER_H */
diff --git a/arch/um/include/asm/Kbuild b/arch/um/include/asm/Kbuild
index b08bd2966253..f1f3f52f1e9c 100644
--- a/arch/um/include/asm/Kbuild
+++ b/arch/um/include/asm/Kbuild
@@ -24,7 +24,6 @@ generic-y += softirq_stack.h
generic-y += switch_to.h
generic-y += topology.h
generic-y += trace_clock.h
-generic-y += word-at-a-time.h
generic-y += kprobes.h
generic-y += mm_hooks.h
generic-y += vga.h
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 4138939532c6..d23e80a56eb8 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -249,6 +249,7 @@ enum x86_intercept_stage;
#define PFERR_SGX_BIT 15
#define PFERR_GUEST_FINAL_BIT 32
#define PFERR_GUEST_PAGE_BIT 33
+#define PFERR_IMPLICIT_ACCESS_BIT 48
#define PFERR_PRESENT_MASK (1U << PFERR_PRESENT_BIT)
#define PFERR_WRITE_MASK (1U << PFERR_WRITE_BIT)
@@ -259,6 +260,7 @@ enum x86_intercept_stage;
#define PFERR_SGX_MASK (1U << PFERR_SGX_BIT)
#define PFERR_GUEST_FINAL_MASK (1ULL << PFERR_GUEST_FINAL_BIT)
#define PFERR_GUEST_PAGE_MASK (1ULL << PFERR_GUEST_PAGE_BIT)
+#define PFERR_IMPLICIT_ACCESS (1ULL << PFERR_IMPLICIT_ACCESS_BIT)
#define PFERR_NESTED_GUEST_PAGE (PFERR_GUEST_PAGE_MASK | \
PFERR_WRITE_MASK | \
@@ -430,7 +432,7 @@ struct kvm_mmu {
void (*inject_page_fault)(struct kvm_vcpu *vcpu,
struct x86_exception *fault);
gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
- gpa_t gva_or_gpa, u32 access,
+ gpa_t gva_or_gpa, u64 access,
struct x86_exception *exception);
int (*sync_page)(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp);
@@ -512,6 +514,7 @@ struct kvm_pmu {
u64 global_ctrl_mask;
u64 global_ovf_ctrl_mask;
u64 reserved_bits;
+ u64 raw_event_mask;
u8 version;
struct kvm_pmc gp_counters[INTEL_PMC_MAX_GENERIC];
struct kvm_pmc fixed_counters[KVM_PMC_MAX_FIXED];
@@ -1040,14 +1043,16 @@ struct kvm_x86_msr_filter {
struct msr_bitmap_range ranges[16];
};
-#define APICV_INHIBIT_REASON_DISABLE 0
-#define APICV_INHIBIT_REASON_HYPERV 1
-#define APICV_INHIBIT_REASON_NESTED 2
-#define APICV_INHIBIT_REASON_IRQWIN 3
-#define APICV_INHIBIT_REASON_PIT_REINJ 4
-#define APICV_INHIBIT_REASON_X2APIC 5
-#define APICV_INHIBIT_REASON_BLOCKIRQ 6
-#define APICV_INHIBIT_REASON_ABSENT 7
+enum kvm_apicv_inhibit {
+ APICV_INHIBIT_REASON_DISABLE,
+ APICV_INHIBIT_REASON_HYPERV,
+ APICV_INHIBIT_REASON_NESTED,
+ APICV_INHIBIT_REASON_IRQWIN,
+ APICV_INHIBIT_REASON_PIT_REINJ,
+ APICV_INHIBIT_REASON_X2APIC,
+ APICV_INHIBIT_REASON_BLOCKIRQ,
+ APICV_INHIBIT_REASON_ABSENT,
+};
struct kvm_arch {
unsigned long n_used_mmu_pages;
@@ -1401,7 +1406,7 @@ struct kvm_x86_ops {
void (*enable_nmi_window)(struct kvm_vcpu *vcpu);
void (*enable_irq_window)(struct kvm_vcpu *vcpu);
void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr);
- bool (*check_apicv_inhibit_reasons)(ulong bit);
+ bool (*check_apicv_inhibit_reasons)(enum kvm_apicv_inhibit reason);
void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *vcpu);
void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr);
void (*hwapic_isr_update)(struct kvm_vcpu *vcpu, int isr);
@@ -1585,7 +1590,7 @@ void kvm_mmu_module_exit(void);
void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
int kvm_mmu_create(struct kvm_vcpu *vcpu);
-void kvm_mmu_init_vm(struct kvm *kvm);
+int kvm_mmu_init_vm(struct kvm *kvm);
void kvm_mmu_uninit_vm(struct kvm *kvm);
void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu);
@@ -1795,11 +1800,22 @@ gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
bool kvm_apicv_activated(struct kvm *kvm);
void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu);
-void kvm_request_apicv_update(struct kvm *kvm, bool activate,
- unsigned long bit);
+void __kvm_set_or_clear_apicv_inhibit(struct kvm *kvm,
+ enum kvm_apicv_inhibit reason, bool set);
+void kvm_set_or_clear_apicv_inhibit(struct kvm *kvm,
+ enum kvm_apicv_inhibit reason, bool set);
+
+static inline void kvm_set_apicv_inhibit(struct kvm *kvm,
+ enum kvm_apicv_inhibit reason)
+{
+ kvm_set_or_clear_apicv_inhibit(kvm, reason, true);
+}
-void __kvm_request_apicv_update(struct kvm *kvm, bool activate,
- unsigned long bit);
+static inline void kvm_clear_apicv_inhibit(struct kvm *kvm,
+ enum kvm_apicv_inhibit reason)
+{
+ kvm_set_or_clear_apicv_inhibit(kvm, reason, false);
+}
int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index 7eb2df5417fb..f70a5108d464 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -221,8 +221,14 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
#define SVM_NESTED_CTL_SEV_ES_ENABLE BIT(2)
+#define SVM_TSC_RATIO_RSVD 0xffffff0000000000ULL
+#define SVM_TSC_RATIO_MIN 0x0000000000000001ULL
+#define SVM_TSC_RATIO_MAX 0x000000ffffffffffULL
+#define SVM_TSC_RATIO_DEFAULT 0x0100000000ULL
+
+
/* AVIC */
-#define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK (0xFF)
+#define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK (0xFFULL)
#define AVIC_LOGICAL_ID_ENTRY_VALID_BIT 31
#define AVIC_LOGICAL_ID_ENTRY_VALID_MASK (1 << 31)
@@ -230,9 +236,11 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
#define AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK (0xFFFFFFFFFFULL << 12)
#define AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK (1ULL << 62)
#define AVIC_PHYSICAL_ID_ENTRY_VALID_MASK (1ULL << 63)
-#define AVIC_PHYSICAL_ID_TABLE_SIZE_MASK (0xFF)
+#define AVIC_PHYSICAL_ID_TABLE_SIZE_MASK (0xFFULL)
+
+#define AVIC_DOORBELL_PHYSICAL_ID_MASK GENMASK_ULL(11, 0)
-#define AVIC_DOORBELL_PHYSICAL_ID_MASK (0xFF)
+#define VMCB_AVIC_APIC_BAR_MASK 0xFFFFFFFFFF000ULL
#define AVIC_UNACCEL_ACCESS_WRITE_MASK 1
#define AVIC_UNACCEL_ACCESS_OFFSET_MASK 0xFF0
diff --git a/arch/x86/include/asm/user_32.h b/arch/x86/include/asm/user_32.h
index d72c3d66e94f..8963915e533f 100644
--- a/arch/x86/include/asm/user_32.h
+++ b/arch/x86/include/asm/user_32.h
@@ -124,9 +124,5 @@ struct user{
char u_comm[32]; /* User command that was responsible */
int u_debugreg[8];
};
-#define NBPG PAGE_SIZE
-#define UPAGES 1
-#define HOST_TEXT_START_ADDR (u.start_code)
-#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG)
#endif /* _ASM_X86_USER_32_H */
diff --git a/arch/x86/include/asm/user_64.h b/arch/x86/include/asm/user_64.h
index db909923611c..1dd10f07ccd6 100644
--- a/arch/x86/include/asm/user_64.h
+++ b/arch/x86/include/asm/user_64.h
@@ -130,9 +130,5 @@ struct user {
unsigned long error_code; /* CPU error code or 0 */
unsigned long fault_address; /* CR3 or 0 */
};
-#define NBPG PAGE_SIZE
-#define UPAGES 1
-#define HOST_TEXT_START_ADDR (u.start_code)
-#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG)
#endif /* _ASM_X86_USER_64_H */
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 79e0b8d63ffa..a22deb58f86d 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -517,7 +517,7 @@ static void __send_ipi_mask(const struct cpumask *mask, int vector)
} else if (apic_id < min && max - apic_id < KVM_IPI_CLUSTER_SIZE) {
ipi_bitmap <<= min - apic_id;
min = apic_id;
- } else if (apic_id < min + KVM_IPI_CLUSTER_SIZE) {
+ } else if (apic_id > min && apic_id < min + KVM_IPI_CLUSTER_SIZE) {
max = apic_id < max ? max : apic_id;
} else {
ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index a00cd97b2623..b24ca7f4ed7c 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -735,6 +735,7 @@ static struct kvm_cpuid_entry2 *do_host_cpuid(struct kvm_cpuid_array *array,
if (function > READ_ONCE(max_cpuid_80000000))
return entry;
}
+ break;
default:
break;
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index f9c00c89829b..89b11e7dca8a 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -3540,8 +3540,10 @@ static int em_rdpid(struct x86_emulate_ctxt *ctxt)
{
u64 tsc_aux = 0;
- if (ctxt->ops->get_msr(ctxt, MSR_TSC_AUX, &tsc_aux))
+ if (!ctxt->ops->guest_has_rdpid(ctxt))
return emulate_ud(ctxt);
+
+ ctxt->ops->get_msr(ctxt, MSR_TSC_AUX, &tsc_aux);
ctxt->dst.val = tsc_aux;
return X86EMUL_CONTINUE;
}
@@ -3642,7 +3644,7 @@ static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
| ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
- r = ctxt->ops->set_msr(ctxt, msr_index, msr_data);
+ r = ctxt->ops->set_msr_with_filter(ctxt, msr_index, msr_data);
if (r == X86EMUL_IO_NEEDED)
return r;
@@ -3659,7 +3661,7 @@ static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
u64 msr_data;
int r;
- r = ctxt->ops->get_msr(ctxt, msr_index, &msr_data);
+ r = ctxt->ops->get_msr_with_filter(ctxt, msr_index, &msr_data);
if (r == X86EMUL_IO_NEEDED)
return r;
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index a32f54ab84a2..123b677111c5 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -122,9 +122,13 @@ static void synic_update_vector(struct kvm_vcpu_hv_synic *synic,
else
hv->synic_auto_eoi_used--;
- __kvm_request_apicv_update(vcpu->kvm,
- !hv->synic_auto_eoi_used,
- APICV_INHIBIT_REASON_HYPERV);
+ /*
+ * Inhibit APICv if any vCPU is using SynIC's AutoEOI, which relies on
+ * the hypervisor to manually inject IRQs.
+ */
+ __kvm_set_or_clear_apicv_inhibit(vcpu->kvm,
+ APICV_INHIBIT_REASON_HYPERV,
+ !!hv->synic_auto_eoi_used);
up_write(&vcpu->kvm->arch.apicv_update_lock);
}
@@ -239,7 +243,7 @@ static int synic_set_msr(struct kvm_vcpu_hv_synic *synic,
struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic);
int ret;
- if (!synic->active && !host)
+ if (!synic->active && (!host || data))
return 1;
trace_kvm_hv_synic_set_msr(vcpu->vcpu_id, msr, data, host);
@@ -285,6 +289,9 @@ static int synic_set_msr(struct kvm_vcpu_hv_synic *synic,
case HV_X64_MSR_EOM: {
int i;
+ if (!synic->active)
+ break;
+
for (i = 0; i < ARRAY_SIZE(synic->sint); i++)
kvm_hv_notify_acked_sint(vcpu, i);
break;
@@ -449,6 +456,9 @@ static int synic_set_irq(struct kvm_vcpu_hv_synic *synic, u32 sint)
struct kvm_lapic_irq irq;
int ret, vector;
+ if (KVM_BUG_ON(!lapic_in_kernel(vcpu), vcpu->kvm))
+ return -EINVAL;
+
if (sint >= ARRAY_SIZE(synic->sint))
return -EINVAL;
@@ -661,7 +671,7 @@ static int stimer_set_config(struct kvm_vcpu_hv_stimer *stimer, u64 config,
struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu);
- if (!synic->active && !host)
+ if (!synic->active && (!host || config))
return 1;
if (unlikely(!host && hv_vcpu->enforce_cpuid && new_config.direct_mode &&
@@ -690,7 +700,7 @@ static int stimer_set_count(struct kvm_vcpu_hv_stimer *stimer, u64 count,
struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu);
- if (!synic->active && !host)
+ if (!synic->active && (!host || count))
return 1;
trace_kvm_hv_stimer_set_count(hv_stimer_to_vcpu(stimer)->vcpu_id,
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index 0b65a764ed3a..1c83076091af 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -305,15 +305,13 @@ void kvm_pit_set_reinject(struct kvm_pit *pit, bool reinject)
* So, deactivate APICv when PIT is in reinject mode.
*/
if (reinject) {
- kvm_request_apicv_update(kvm, false,
- APICV_INHIBIT_REASON_PIT_REINJ);
+ kvm_set_apicv_inhibit(kvm, APICV_INHIBIT_REASON_PIT_REINJ);
/* The initial state is preserved while ps->reinject == 0. */
kvm_pit_reset_reinject(pit);
kvm_register_irq_ack_notifier(kvm, &ps->irq_ack_notifier);
kvm_register_irq_mask_notifier(kvm, 0, &pit->mask_notifier);
} else {
- kvm_request_apicv_update(kvm, true,
- APICV_INHIBIT_REASON_PIT_REINJ);
+ kvm_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_PIT_REINJ);
kvm_unregister_irq_ack_notifier(kvm, &ps->irq_ack_notifier);
kvm_unregister_irq_mask_notifier(kvm, 0, &pit->mask_notifier);
}
diff --git a/arch/x86/kvm/kvm_emulate.h b/arch/x86/kvm/kvm_emulate.h
index 840ddb4a9302..8dff25d267b7 100644
--- a/arch/x86/kvm/kvm_emulate.h
+++ b/arch/x86/kvm/kvm_emulate.h
@@ -210,6 +210,8 @@ struct x86_emulate_ops {
int (*set_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong value);
u64 (*get_smbase)(struct x86_emulate_ctxt *ctxt);
void (*set_smbase)(struct x86_emulate_ctxt *ctxt, u64 smbase);
+ int (*set_msr_with_filter)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 data);
+ int (*get_msr_with_filter)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 *pdata);
int (*set_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 data);
int (*get_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 *pdata);
int (*check_pmc)(struct x86_emulate_ctxt *ctxt, u32 pmc);
@@ -226,6 +228,7 @@ struct x86_emulate_ops {
bool (*guest_has_long_mode)(struct x86_emulate_ctxt *ctxt);
bool (*guest_has_movbe)(struct x86_emulate_ctxt *ctxt);
bool (*guest_has_fxsr)(struct x86_emulate_ctxt *ctxt);
+ bool (*guest_has_rdpid)(struct x86_emulate_ctxt *ctxt);
void (*set_nmi_mask)(struct x86_emulate_ctxt *ctxt, bool masked);
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 80a2020c4db4..66b0eb0bda94 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1024,6 +1024,10 @@ bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
*r = -1;
if (irq->shorthand == APIC_DEST_SELF) {
+ if (KVM_BUG_ON(!src, kvm)) {
+ *r = 0;
+ return true;
+ }
*r = kvm_apic_set_irq(src->vcpu, irq, dest_map);
return true;
}
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index bf8dbc4bb12a..e6cae6f22683 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -214,27 +214,27 @@ static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
*/
static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
unsigned pte_access, unsigned pte_pkey,
- unsigned pfec)
+ u64 access)
{
- int cpl = static_call(kvm_x86_get_cpl)(vcpu);
+ /* strip nested paging fault error codes */
+ unsigned int pfec = access;
unsigned long rflags = static_call(kvm_x86_get_rflags)(vcpu);
/*
- * If CPL < 3, SMAP prevention are disabled if EFLAGS.AC = 1.
+ * For explicit supervisor accesses, SMAP is disabled if EFLAGS.AC = 1.
+ * For implicit supervisor accesses, SMAP cannot be overridden.
*
- * If CPL = 3, SMAP applies to all supervisor-mode data accesses
- * (these are implicit supervisor accesses) regardless of the value
- * of EFLAGS.AC.
+ * SMAP works on supervisor accesses only, and not_smap can
+ * be set or not set when user access with neither has any bearing
+ * on the result.
*
- * This computes (cpl < 3) && (rflags & X86_EFLAGS_AC), leaving
- * the result in X86_EFLAGS_AC. We then insert it in place of
- * the PFERR_RSVD_MASK bit; this bit will always be zero in pfec,
- * but it will be one in index if SMAP checks are being overridden.
- * It is important to keep this branchless.
+ * We put the SMAP checking bit in place of the PFERR_RSVD_MASK bit;
+ * this bit will always be zero in pfec, but it will be one in index
+ * if SMAP checks are being disabled.
*/
- unsigned long smap = (cpl - 3) & (rflags & X86_EFLAGS_AC);
- int index = (pfec >> 1) +
- (smap >> (X86_EFLAGS_AC_BIT - PFERR_RSVD_BIT + 1));
+ u64 implicit_access = access & PFERR_IMPLICIT_ACCESS;
+ bool not_smap = ((rflags & X86_EFLAGS_AC) | implicit_access) == X86_EFLAGS_AC;
+ int index = (pfec + (not_smap << PFERR_RSVD_BIT)) >> 1;
bool fault = (mmu->permissions[index] >> pte_access) & 1;
u32 errcode = PFERR_PRESENT_MASK;
@@ -317,12 +317,12 @@ static inline void kvm_update_page_stats(struct kvm *kvm, int level, int count)
atomic64_add(count, &kvm->stat.pages[level - 1]);
}
-gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
+gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u64 access,
struct x86_exception *exception);
static inline gpa_t kvm_translate_gpa(struct kvm_vcpu *vcpu,
struct kvm_mmu *mmu,
- gpa_t gpa, u32 access,
+ gpa_t gpa, u64 access,
struct x86_exception *exception)
{
if (mmu != &vcpu->arch.nested_mmu)
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 51671cb34fb6..8f19ea752704 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -2696,8 +2696,8 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot,
if (*sptep == spte) {
ret = RET_PF_SPURIOUS;
} else {
- trace_kvm_mmu_set_spte(level, gfn, sptep);
flush |= mmu_spte_update(sptep, spte);
+ trace_kvm_mmu_set_spte(level, gfn, sptep);
}
if (wrprot) {
@@ -3703,7 +3703,7 @@ void kvm_mmu_sync_prev_roots(struct kvm_vcpu *vcpu)
}
static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
- gpa_t vaddr, u32 access,
+ gpa_t vaddr, u64 access,
struct x86_exception *exception)
{
if (exception)
@@ -4591,11 +4591,11 @@ static void update_permission_bitmask(struct kvm_mmu *mmu, bool ept)
* - X86_CR4_SMAP is set in CR4
* - A user page is accessed
* - The access is not a fetch
- * - Page fault in kernel mode
- * - if CPL = 3 or X86_EFLAGS_AC is clear
+ * - The access is supervisor mode
+ * - If implicit supervisor access or X86_EFLAGS_AC is clear
*
- * Here, we cover the first three conditions.
- * The fourth is computed dynamically in permission_fault();
+ * Here, we cover the first four conditions.
+ * The fifth is computed dynamically in permission_fault();
* PFERR_RSVD_MASK bit will be set in PFEC if the access is
* *not* subject to SMAP restrictions.
*/
@@ -5768,17 +5768,24 @@ static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm,
kvm_mmu_zap_all_fast(kvm);
}
-void kvm_mmu_init_vm(struct kvm *kvm)
+int kvm_mmu_init_vm(struct kvm *kvm)
{
struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
+ int r;
+ INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
+ INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages);
+ INIT_LIST_HEAD(&kvm->arch.lpage_disallowed_mmu_pages);
spin_lock_init(&kvm->arch.mmu_unsync_pages_lock);
- kvm_mmu_init_tdp_mmu(kvm);
+ r = kvm_mmu_init_tdp_mmu(kvm);
+ if (r < 0)
+ return r;
node->track_write = kvm_mmu_pte_write;
node->track_flush_slot = kvm_mmu_invalidate_zap_pages_in_memslot;
kvm_page_track_register_notifier(kvm, node);
+ return 0;
}
void kvm_mmu_uninit_vm(struct kvm *kvm)
@@ -5842,8 +5849,8 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
if (is_tdp_mmu_enabled(kvm)) {
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
- flush = kvm_tdp_mmu_zap_gfn_range(kvm, i, gfn_start,
- gfn_end, flush);
+ flush = kvm_tdp_mmu_zap_leafs(kvm, i, gfn_start,
+ gfn_end, true, flush);
}
if (flush)
diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
index 252c77805eb9..01fee5f67ac3 100644
--- a/arch/x86/kvm/mmu/paging_tmpl.h
+++ b/arch/x86/kvm/mmu/paging_tmpl.h
@@ -34,9 +34,8 @@
#define PT_HAVE_ACCESSED_DIRTY(mmu) true
#ifdef CONFIG_X86_64
#define PT_MAX_FULL_LEVELS PT64_ROOT_MAX_LEVEL
- #define CMPXCHG cmpxchg
+ #define CMPXCHG "cmpxchgq"
#else
- #define CMPXCHG cmpxchg64
#define PT_MAX_FULL_LEVELS 2
#endif
#elif PTTYPE == 32
@@ -52,7 +51,7 @@
#define PT_GUEST_DIRTY_SHIFT PT_DIRTY_SHIFT
#define PT_GUEST_ACCESSED_SHIFT PT_ACCESSED_SHIFT
#define PT_HAVE_ACCESSED_DIRTY(mmu) true
- #define CMPXCHG cmpxchg
+ #define CMPXCHG "cmpxchgl"
#elif PTTYPE == PTTYPE_EPT
#define pt_element_t u64
#define guest_walker guest_walkerEPT
@@ -65,7 +64,9 @@
#define PT_GUEST_DIRTY_SHIFT 9
#define PT_GUEST_ACCESSED_SHIFT 8
#define PT_HAVE_ACCESSED_DIRTY(mmu) ((mmu)->ept_ad)
- #define CMPXCHG cmpxchg64
+ #ifdef CONFIG_X86_64
+ #define CMPXCHG "cmpxchgq"
+ #endif
#define PT_MAX_FULL_LEVELS PT64_ROOT_MAX_LEVEL
#else
#error Invalid PTTYPE value
@@ -147,43 +148,36 @@ static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
pt_element_t __user *ptep_user, unsigned index,
pt_element_t orig_pte, pt_element_t new_pte)
{
- int npages;
- pt_element_t ret;
- pt_element_t *table;
- struct page *page;
-
- npages = get_user_pages_fast((unsigned long)ptep_user, 1, FOLL_WRITE, &page);
- if (likely(npages == 1)) {
- table = kmap_atomic(page);
- ret = CMPXCHG(&table[index], orig_pte, new_pte);
- kunmap_atomic(table);
-
- kvm_release_page_dirty(page);
- } else {
- struct vm_area_struct *vma;
- unsigned long vaddr = (unsigned long)ptep_user & PAGE_MASK;
- unsigned long pfn;
- unsigned long paddr;
-
- mmap_read_lock(current->mm);
- vma = find_vma_intersection(current->mm, vaddr, vaddr + PAGE_SIZE);
- if (!vma || !(vma->vm_flags & VM_PFNMAP)) {
- mmap_read_unlock(current->mm);
- return -EFAULT;
- }
- pfn = ((vaddr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
- paddr = pfn << PAGE_SHIFT;
- table = memremap(paddr, PAGE_SIZE, MEMREMAP_WB);
- if (!table) {
- mmap_read_unlock(current->mm);
- return -EFAULT;
- }
- ret = CMPXCHG(&table[index], orig_pte, new_pte);
- memunmap(table);
- mmap_read_unlock(current->mm);
- }
+ signed char r;
- return (ret != orig_pte);
+ if (!user_access_begin(ptep_user, sizeof(pt_element_t)))
+ return -EFAULT;
+
+#ifdef CMPXCHG
+ asm volatile("1:" LOCK_PREFIX CMPXCHG " %[new], %[ptr]\n"
+ "setnz %b[r]\n"
+ "2:"
+ _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, %k[r])
+ : [ptr] "+m" (*ptep_user),
+ [old] "+a" (orig_pte),
+ [r] "=q" (r)
+ : [new] "r" (new_pte)
+ : "memory");
+#else
+ asm volatile("1:" LOCK_PREFIX "cmpxchg8b %[ptr]\n"
+ "setnz %b[r]\n"
+ "2:"
+ _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, %k[r])
+ : [ptr] "+m" (*ptep_user),
+ [old] "+A" (orig_pte),
+ [r] "=q" (r)
+ : [new_lo] "b" ((u32)new_pte),
+ [new_hi] "c" ((u32)(new_pte >> 32))
+ : "memory");
+#endif
+
+ user_access_end();
+ return r;
}
static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu,
@@ -339,7 +333,7 @@ static inline bool FNAME(is_last_gpte)(struct kvm_mmu *mmu,
*/
static int FNAME(walk_addr_generic)(struct guest_walker *walker,
struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
- gpa_t addr, u32 access)
+ gpa_t addr, u64 access)
{
int ret;
pt_element_t pte;
@@ -347,7 +341,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
gfn_t table_gfn;
u64 pt_access, pte_access;
unsigned index, accessed_dirty, pte_pkey;
- unsigned nested_access;
+ u64 nested_access;
gpa_t pte_gpa;
bool have_ad;
int offset;
@@ -540,7 +534,7 @@ error:
}
static int FNAME(walk_addr)(struct guest_walker *walker,
- struct kvm_vcpu *vcpu, gpa_t addr, u32 access)
+ struct kvm_vcpu *vcpu, gpa_t addr, u64 access)
{
return FNAME(walk_addr_generic)(walker, vcpu, vcpu->arch.mmu, addr,
access);
@@ -988,7 +982,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa)
/* Note, @addr is a GPA when gva_to_gpa() translates an L2 GPA to an L1 GPA. */
static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
- gpa_t addr, u32 access,
+ gpa_t addr, u64 access,
struct x86_exception *exception)
{
struct guest_walker walker;
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index e7e7876251b3..d71d177ae6b8 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -14,21 +14,24 @@ static bool __read_mostly tdp_mmu_enabled = true;
module_param_named(tdp_mmu, tdp_mmu_enabled, bool, 0644);
/* Initializes the TDP MMU for the VM, if enabled. */
-bool kvm_mmu_init_tdp_mmu(struct kvm *kvm)
+int kvm_mmu_init_tdp_mmu(struct kvm *kvm)
{
+ struct workqueue_struct *wq;
+
if (!tdp_enabled || !READ_ONCE(tdp_mmu_enabled))
- return false;
+ return 0;
+
+ wq = alloc_workqueue("kvm", WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE, 0);
+ if (!wq)
+ return -ENOMEM;
/* This should not be changed for the lifetime of the VM. */
kvm->arch.tdp_mmu_enabled = true;
-
INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots);
spin_lock_init(&kvm->arch.tdp_mmu_pages_lock);
INIT_LIST_HEAD(&kvm->arch.tdp_mmu_pages);
- kvm->arch.tdp_mmu_zap_wq =
- alloc_workqueue("kvm", WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE, 0);
-
- return true;
+ kvm->arch.tdp_mmu_zap_wq = wq;
+ return 1;
}
/* Arbitrarily returns true so that this may be used in if statements. */
@@ -906,10 +909,8 @@ bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
}
/*
- * Tears down the mappings for the range of gfns, [start, end), and frees the
- * non-root pages mapping GFNs strictly within that range. Returns true if
- * SPTEs have been cleared and a TLB flush is needed before releasing the
- * MMU lock.
+ * Zap leafs SPTEs for the range of gfns, [start, end). Returns true if SPTEs
+ * have been cleared and a TLB flush is needed before releasing the MMU lock.
*
* If can_yield is true, will release the MMU lock and reschedule if the
* scheduler needs the CPU or there is contention on the MMU lock. If this
@@ -917,42 +918,25 @@ bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
* the caller must ensure it does not supply too large a GFN range, or the
* operation can cause a soft lockup.
*/
-static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
- gfn_t start, gfn_t end, bool can_yield, bool flush)
+static bool tdp_mmu_zap_leafs(struct kvm *kvm, struct kvm_mmu_page *root,
+ gfn_t start, gfn_t end, bool can_yield, bool flush)
{
- bool zap_all = (start == 0 && end >= tdp_mmu_max_gfn_host());
struct tdp_iter iter;
- /*
- * No need to try to step down in the iterator when zapping all SPTEs,
- * zapping the top-level non-leaf SPTEs will recurse on their children.
- */
- int min_level = zap_all ? root->role.level : PG_LEVEL_4K;
-
end = min(end, tdp_mmu_max_gfn_host());
lockdep_assert_held_write(&kvm->mmu_lock);
rcu_read_lock();
- for_each_tdp_pte_min_level(iter, root, min_level, start, end) {
+ for_each_tdp_pte_min_level(iter, root, PG_LEVEL_4K, start, end) {
if (can_yield &&
tdp_mmu_iter_cond_resched(kvm, &iter, flush, false)) {
flush = false;
continue;
}
- if (!is_shadow_present_pte(iter.old_spte))
- continue;
-
- /*
- * If this is a non-last-level SPTE that covers a larger range
- * than should be zapped, continue, and zap the mappings at a
- * lower level, except when zapping all SPTEs.
- */
- if (!zap_all &&
- (iter.gfn < start ||
- iter.gfn + KVM_PAGES_PER_HPAGE(iter.level) > end) &&
+ if (!is_shadow_present_pte(iter.old_spte) ||
!is_last_spte(iter.old_spte, iter.level))
continue;
@@ -960,17 +944,13 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
flush = true;
}
- /*
- * Need to flush before releasing RCU. TODO: do it only if intermediate
- * page tables were zapped; there is no need to flush under RCU protection
- * if no 'struct kvm_mmu_page' is freed.
- */
- if (flush)
- kvm_flush_remote_tlbs_with_address(kvm, start, end - start);
-
rcu_read_unlock();
- return false;
+ /*
+ * Because this flow zaps _only_ leaf SPTEs, the caller doesn't need
+ * to provide RCU protection as no 'struct kvm_mmu_page' will be freed.
+ */
+ return flush;
}
/*
@@ -979,13 +959,13 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
* SPTEs have been cleared and a TLB flush is needed before releasing the
* MMU lock.
*/
-bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start,
- gfn_t end, bool can_yield, bool flush)
+bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, int as_id, gfn_t start, gfn_t end,
+ bool can_yield, bool flush)
{
struct kvm_mmu_page *root;
for_each_tdp_mmu_root_yield_safe(kvm, root, as_id)
- flush = zap_gfn_range(kvm, root, start, end, can_yield, flush);
+ flush = tdp_mmu_zap_leafs(kvm, root, start, end, can_yield, flush);
return flush;
}
@@ -1233,8 +1213,8 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
bool flush)
{
- return __kvm_tdp_mmu_zap_gfn_range(kvm, range->slot->as_id, range->start,
- range->end, range->may_block, flush);
+ return kvm_tdp_mmu_zap_leafs(kvm, range->slot->as_id, range->start,
+ range->end, range->may_block, flush);
}
typedef bool (*tdp_handler_t)(struct kvm *kvm, struct tdp_iter *iter,
diff --git a/arch/x86/kvm/mmu/tdp_mmu.h b/arch/x86/kvm/mmu/tdp_mmu.h
index 5e5ef2576c81..c163f7cc23ca 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.h
+++ b/arch/x86/kvm/mmu/tdp_mmu.h
@@ -15,14 +15,8 @@ __must_check static inline bool kvm_tdp_mmu_get_root(struct kvm_mmu_page *root)
void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
bool shared);
-bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start,
+bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, int as_id, gfn_t start,
gfn_t end, bool can_yield, bool flush);
-static inline bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id,
- gfn_t start, gfn_t end, bool flush)
-{
- return __kvm_tdp_mmu_zap_gfn_range(kvm, as_id, start, end, true, flush);
-}
-
bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp);
void kvm_tdp_mmu_zap_all(struct kvm *kvm);
void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm);
@@ -72,7 +66,7 @@ u64 *kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, u64 addr,
u64 *spte);
#ifdef CONFIG_X86_64
-bool kvm_mmu_init_tdp_mmu(struct kvm *kvm);
+int kvm_mmu_init_tdp_mmu(struct kvm *kvm);
void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm);
static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return sp->tdp_mmu_page; }
@@ -93,7 +87,7 @@ static inline bool is_tdp_mmu(struct kvm_mmu *mmu)
return sp && is_tdp_mmu_page(sp) && sp->root_count;
}
#else
-static inline bool kvm_mmu_init_tdp_mmu(struct kvm *kvm) { return false; }
+static inline int kvm_mmu_init_tdp_mmu(struct kvm *kvm) { return 0; }
static inline void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) {}
static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return false; }
static inline bool is_tdp_mmu(struct kvm_mmu *mmu) { return false; }
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index b1a02993782b..eca39f56c231 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -96,8 +96,7 @@ static void kvm_perf_overflow(struct perf_event *perf_event,
static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
u64 config, bool exclude_user,
- bool exclude_kernel, bool intr,
- bool in_tx, bool in_tx_cp)
+ bool exclude_kernel, bool intr)
{
struct perf_event *event;
struct perf_event_attr attr = {
@@ -116,16 +115,14 @@ static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
attr.sample_period = get_sample_period(pmc, pmc->counter);
- if (in_tx)
- attr.config |= HSW_IN_TX;
- if (in_tx_cp) {
+ if ((attr.config & HSW_IN_TX_CHECKPOINTED) &&
+ guest_cpuid_is_intel(pmc->vcpu)) {
/*
* HSW_IN_TX_CHECKPOINTED is not supported with nonzero
* period. Just clear the sample period so at least
* allocating the counter doesn't fail.
*/
attr.sample_period = 0;
- attr.config |= HSW_IN_TX_CHECKPOINTED;
}
event = perf_event_create_kernel_counter(&attr, -1, current,
@@ -185,6 +182,7 @@ void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
u32 type = PERF_TYPE_RAW;
struct kvm *kvm = pmc->vcpu->kvm;
struct kvm_pmu_event_filter *filter;
+ struct kvm_pmu *pmu = vcpu_to_pmu(pmc->vcpu);
bool allow_event = true;
if (eventsel & ARCH_PERFMON_EVENTSEL_PIN_CONTROL)
@@ -221,7 +219,7 @@ void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
}
if (type == PERF_TYPE_RAW)
- config = eventsel & AMD64_RAW_EVENT_MASK;
+ config = eventsel & pmu->raw_event_mask;
if (pmc->current_config == eventsel && pmc_resume_counter(pmc))
return;
@@ -232,9 +230,7 @@ void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
pmc_reprogram_counter(pmc, type, config,
!(eventsel & ARCH_PERFMON_EVENTSEL_USR),
!(eventsel & ARCH_PERFMON_EVENTSEL_OS),
- eventsel & ARCH_PERFMON_EVENTSEL_INT,
- (eventsel & HSW_IN_TX),
- (eventsel & HSW_IN_TX_CHECKPOINTED));
+ eventsel & ARCH_PERFMON_EVENTSEL_INT);
}
EXPORT_SYMBOL_GPL(reprogram_gp_counter);
@@ -270,7 +266,7 @@ void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int idx)
kvm_x86_ops.pmu_ops->pmc_perf_hw_id(pmc),
!(en_field & 0x2), /* exclude user */
!(en_field & 0x1), /* exclude kernel */
- pmi, false, false);
+ pmi);
}
EXPORT_SYMBOL_GPL(reprogram_fixed_counter);
diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c
index b37b353ec086..a1cf9c31273b 100644
--- a/arch/x86/kvm/svm/avic.c
+++ b/arch/x86/kvm/svm/avic.c
@@ -726,7 +726,7 @@ int avic_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
{
struct kvm_kernel_irq_routing_entry *e;
struct kvm_irq_routing_table *irq_rt;
- int idx, ret = -EINVAL;
+ int idx, ret = 0;
if (!kvm_arch_has_assigned_device(kvm) ||
!irq_remapping_cap(IRQ_POSTING_CAP))
@@ -737,7 +737,13 @@ int avic_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
idx = srcu_read_lock(&kvm->irq_srcu);
irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
- WARN_ON(guest_irq >= irq_rt->nr_rt_entries);
+
+ if (guest_irq >= irq_rt->nr_rt_entries ||
+ hlist_empty(&irq_rt->map[guest_irq])) {
+ pr_warn_once("no route for guest_irq %u/%u (broken user space?)\n",
+ guest_irq, irq_rt->nr_rt_entries);
+ goto out;
+ }
hlist_for_each_entry(e, &irq_rt->map[guest_irq], link) {
struct vcpu_data vcpu_info;
@@ -822,7 +828,7 @@ out:
return ret;
}
-bool avic_check_apicv_inhibit_reasons(ulong bit)
+bool avic_check_apicv_inhibit_reasons(enum kvm_apicv_inhibit reason)
{
ulong supported = BIT(APICV_INHIBIT_REASON_DISABLE) |
BIT(APICV_INHIBIT_REASON_ABSENT) |
@@ -833,7 +839,7 @@ bool avic_check_apicv_inhibit_reasons(ulong bit)
BIT(APICV_INHIBIT_REASON_X2APIC) |
BIT(APICV_INHIBIT_REASON_BLOCKIRQ);
- return supported & BIT(bit);
+ return supported & BIT(reason);
}
diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c
index d4de52409335..24eb935b6f85 100644
--- a/arch/x86/kvm/svm/pmu.c
+++ b/arch/x86/kvm/svm/pmu.c
@@ -262,12 +262,10 @@ static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
/* MSR_EVNTSELn */
pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
if (pmc) {
- if (data == pmc->eventsel)
- return 0;
- if (!(data & pmu->reserved_bits)) {
+ data &= ~pmu->reserved_bits;
+ if (data != pmc->eventsel)
reprogram_gp_counter(pmc, data);
- return 0;
- }
+ return 0;
}
return 1;
@@ -284,6 +282,7 @@ static void amd_pmu_refresh(struct kvm_vcpu *vcpu)
pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << 48) - 1;
pmu->reserved_bits = 0xfffffff000280000ull;
+ pmu->raw_event_mask = AMD64_RAW_EVENT_MASK;
pmu->version = 1;
/* not applicable to AMD; but clean them to prevent any fall out */
pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 0884c3414a1b..bd4c64b362d2 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -62,20 +62,8 @@ MODULE_DEVICE_TABLE(x86cpu, svm_cpu_id);
#define SEG_TYPE_LDT 2
#define SEG_TYPE_BUSY_TSS16 3
-#define SVM_FEATURE_LBRV (1 << 1)
-#define SVM_FEATURE_SVML (1 << 2)
-#define SVM_FEATURE_TSC_RATE (1 << 4)
-#define SVM_FEATURE_VMCB_CLEAN (1 << 5)
-#define SVM_FEATURE_FLUSH_ASID (1 << 6)
-#define SVM_FEATURE_DECODE_ASSIST (1 << 7)
-#define SVM_FEATURE_PAUSE_FILTER (1 << 10)
-
#define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
-#define TSC_RATIO_RSVD 0xffffff0000000000ULL
-#define TSC_RATIO_MIN 0x0000000000000001ULL
-#define TSC_RATIO_MAX 0x000000ffffffffffULL
-
static bool erratum_383_found __read_mostly;
u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
@@ -87,7 +75,6 @@ u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
static uint64_t osvw_len = 4, osvw_status;
static DEFINE_PER_CPU(u64, current_tsc_ratio);
-#define TSC_RATIO_DEFAULT 0x0100000000ULL
static const struct svm_direct_access_msrs {
u32 index; /* Index of the MSR */
@@ -480,7 +467,7 @@ static void svm_hardware_disable(void)
{
/* Make sure we clean up behind us */
if (tsc_scaling)
- wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);
+ wrmsrl(MSR_AMD64_TSC_RATIO, SVM_TSC_RATIO_DEFAULT);
cpu_svm_disable();
@@ -526,8 +513,8 @@ static int svm_hardware_enable(void)
* Set the default value, even if we don't use TSC scaling
* to avoid having stale value in the msr
*/
- wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);
- __this_cpu_write(current_tsc_ratio, TSC_RATIO_DEFAULT);
+ wrmsrl(MSR_AMD64_TSC_RATIO, SVM_TSC_RATIO_DEFAULT);
+ __this_cpu_write(current_tsc_ratio, SVM_TSC_RATIO_DEFAULT);
}
@@ -2723,7 +2710,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
break;
}
- if (data & TSC_RATIO_RSVD)
+ if (data & SVM_TSC_RATIO_RSVD)
return 1;
svm->tsc_ratio_msr = data;
@@ -2918,7 +2905,7 @@ static int interrupt_window_interception(struct kvm_vcpu *vcpu)
* In this case AVIC was temporarily disabled for
* requesting the IRQ window and we have to re-enable it.
*/
- kvm_request_apicv_update(vcpu->kvm, true, APICV_INHIBIT_REASON_IRQWIN);
+ kvm_clear_apicv_inhibit(vcpu->kvm, APICV_INHIBIT_REASON_IRQWIN);
++vcpu->stat.irq_window_exits;
return 1;
@@ -3516,7 +3503,7 @@ static void svm_enable_irq_window(struct kvm_vcpu *vcpu)
* via AVIC. In such case, we need to temporarily disable AVIC,
* and fallback to injecting IRQ via V_IRQ.
*/
- kvm_request_apicv_update(vcpu->kvm, false, APICV_INHIBIT_REASON_IRQWIN);
+ kvm_set_apicv_inhibit(vcpu->kvm, APICV_INHIBIT_REASON_IRQWIN);
svm_set_vintr(svm);
}
}
@@ -3948,6 +3935,7 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
struct kvm_cpuid_entry2 *best;
+ struct kvm *kvm = vcpu->kvm;
vcpu->arch.xsaves_enabled = guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
boot_cpu_has(X86_FEATURE_XSAVE) &&
@@ -3974,16 +3962,14 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
* is exposed to the guest, disable AVIC.
*/
if (guest_cpuid_has(vcpu, X86_FEATURE_X2APIC))
- kvm_request_apicv_update(vcpu->kvm, false,
- APICV_INHIBIT_REASON_X2APIC);
+ kvm_set_apicv_inhibit(kvm, APICV_INHIBIT_REASON_X2APIC);
/*
* Currently, AVIC does not work with nested virtualization.
* So, we disable AVIC when cpuid for SVM is set in the L1 guest.
*/
if (nested && guest_cpuid_has(vcpu, X86_FEATURE_SVM))
- kvm_request_apicv_update(vcpu->kvm, false,
- APICV_INHIBIT_REASON_NESTED);
+ kvm_set_apicv_inhibit(kvm, APICV_INHIBIT_REASON_NESTED);
}
init_vmcb_after_set_cpuid(vcpu);
}
@@ -4766,10 +4752,10 @@ static __init int svm_hardware_setup(void)
} else {
pr_info("TSC scaling supported\n");
kvm_has_tsc_control = true;
- kvm_max_tsc_scaling_ratio = TSC_RATIO_MAX;
- kvm_tsc_scaling_ratio_frac_bits = 32;
}
}
+ kvm_max_tsc_scaling_ratio = SVM_TSC_RATIO_MAX;
+ kvm_tsc_scaling_ratio_frac_bits = 32;
tsc_aux_uret_slot = kvm_add_user_return_msr(MSR_TSC_AUX);
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index e37bb3508cfa..f77a7d2d39dd 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -22,6 +22,8 @@
#include <asm/svm.h>
#include <asm/sev-common.h>
+#include "kvm_cache_regs.h"
+
#define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT)
#define IOPM_SIZE PAGE_SIZE * 3
@@ -569,17 +571,6 @@ extern struct kvm_x86_nested_ops svm_nested_ops;
/* avic.c */
-#define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK (0xFF)
-#define AVIC_LOGICAL_ID_ENTRY_VALID_BIT 31
-#define AVIC_LOGICAL_ID_ENTRY_VALID_MASK (1 << 31)
-
-#define AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK GENMASK_ULL(11, 0)
-#define AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK (0xFFFFFFFFFFULL << 12)
-#define AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK (1ULL << 62)
-#define AVIC_PHYSICAL_ID_ENTRY_VALID_MASK (1ULL << 63)
-
-#define VMCB_AVIC_APIC_BAR_MASK 0xFFFFFFFFFF000ULL
-
int avic_ga_log_notifier(u32 ga_tag);
void avic_vm_destroy(struct kvm *kvm);
int avic_vm_init(struct kvm *kvm);
@@ -592,7 +583,7 @@ void __avic_vcpu_put(struct kvm_vcpu *vcpu);
void avic_apicv_post_state_restore(struct kvm_vcpu *vcpu);
void avic_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu);
-bool avic_check_apicv_inhibit_reasons(ulong bit);
+bool avic_check_apicv_inhibit_reasons(enum kvm_apicv_inhibit reason);
void avic_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr);
void avic_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr);
bool avic_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/kvm/svm/svm_onhyperv.c b/arch/x86/kvm/svm/svm_onhyperv.c
index 98aa981c04ec..8cdc62c74a96 100644
--- a/arch/x86/kvm/svm/svm_onhyperv.c
+++ b/arch/x86/kvm/svm/svm_onhyperv.c
@@ -4,7 +4,6 @@
*/
#include <linux/kvm_host.h>
-#include "kvm_cache_regs.h"
#include <asm/mshyperv.h>
diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h
index 193f5ba930d1..e3a24b8f04be 100644
--- a/arch/x86/kvm/trace.h
+++ b/arch/x86/kvm/trace.h
@@ -1339,23 +1339,25 @@ TRACE_EVENT(kvm_hv_stimer_cleanup,
__entry->vcpu_id, __entry->timer_index)
);
-TRACE_EVENT(kvm_apicv_update_request,
- TP_PROTO(bool activate, unsigned long bit),
- TP_ARGS(activate, bit),
+TRACE_EVENT(kvm_apicv_inhibit_changed,
+ TP_PROTO(int reason, bool set, unsigned long inhibits),
+ TP_ARGS(reason, set, inhibits),
TP_STRUCT__entry(
- __field(bool, activate)
- __field(unsigned long, bit)
+ __field(int, reason)
+ __field(bool, set)
+ __field(unsigned long, inhibits)
),
TP_fast_assign(
- __entry->activate = activate;
- __entry->bit = bit;
+ __entry->reason = reason;
+ __entry->set = set;
+ __entry->inhibits = inhibits;
),
- TP_printk("%s bit=%lu",
- __entry->activate ? "activate" : "deactivate",
- __entry->bit)
+ TP_printk("%s reason=%u, inhibits=0x%lx",
+ __entry->set ? "set" : "cleared",
+ __entry->reason, __entry->inhibits)
);
TRACE_EVENT(kvm_apicv_accept_irq,
diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index 0684e519181b..bc3f8512bb64 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -389,6 +389,7 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
struct kvm_pmc *pmc;
u32 msr = msr_info->index;
u64 data = msr_info->data;
+ u64 reserved_bits;
switch (msr) {
case MSR_CORE_PERF_FIXED_CTR_CTRL:
@@ -443,7 +444,11 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
} else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
if (data == pmc->eventsel)
return 0;
- if (!(data & pmu->reserved_bits)) {
+ reserved_bits = pmu->reserved_bits;
+ if ((pmc->idx == 2) &&
+ (pmu->raw_event_mask & HSW_IN_TX_CHECKPOINTED))
+ reserved_bits ^= HSW_IN_TX_CHECKPOINTED;
+ if (!(data & reserved_bits)) {
reprogram_gp_counter(pmc, data);
return 0;
}
@@ -485,6 +490,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
pmu->version = 0;
pmu->reserved_bits = 0xffffffff00200000ull;
+ pmu->raw_event_mask = X86_RAW_EVENT_MASK;
entry = kvm_find_cpuid_entry(vcpu, 0xa, 0);
if (!entry || !vcpu->kvm->arch.enable_pmu)
@@ -533,8 +539,10 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
entry = kvm_find_cpuid_entry(vcpu, 7, 0);
if (entry &&
(boot_cpu_has(X86_FEATURE_HLE) || boot_cpu_has(X86_FEATURE_RTM)) &&
- (entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM)))
- pmu->reserved_bits ^= HSW_IN_TX|HSW_IN_TX_CHECKPOINTED;
+ (entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM))) {
+ pmu->reserved_bits ^= HSW_IN_TX;
+ pmu->raw_event_mask |= (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED);
+ }
bitmap_set(pmu->all_valid_pmc_idx,
0, pmu->nr_arch_gp_counters);
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index e8963f5af618..04d170c4b61e 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -2866,21 +2866,17 @@ static void enter_rmode(struct kvm_vcpu *vcpu)
int vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
- struct vmx_uret_msr *msr = vmx_find_uret_msr(vmx, MSR_EFER);
/* Nothing to do if hardware doesn't support EFER. */
- if (!msr)
+ if (!vmx_find_uret_msr(vmx, MSR_EFER))
return 0;
vcpu->arch.efer = efer;
- if (efer & EFER_LMA) {
- vm_entry_controls_setbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
- msr->data = efer;
- } else {
- vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
+ if (efer & EFER_LMA)
+ vm_entry_controls_setbit(vmx, VM_ENTRY_IA32E_MODE);
+ else
+ vm_entry_controls_clearbit(vmx, VM_ENTRY_IA32E_MODE);
- msr->data = efer & ~EFER_LME;
- }
vmx_setup_uret_msrs(vmx);
return 0;
}
@@ -2906,7 +2902,6 @@ static void enter_lmode(struct kvm_vcpu *vcpu)
static void exit_lmode(struct kvm_vcpu *vcpu)
{
- vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
vmx_set_efer(vcpu, vcpu->arch.efer & ~EFER_LMA);
}
@@ -7705,14 +7700,14 @@ static void vmx_hardware_unsetup(void)
free_kvm_area();
}
-static bool vmx_check_apicv_inhibit_reasons(ulong bit)
+static bool vmx_check_apicv_inhibit_reasons(enum kvm_apicv_inhibit reason)
{
ulong supported = BIT(APICV_INHIBIT_REASON_DISABLE) |
BIT(APICV_INHIBIT_REASON_ABSENT) |
BIT(APICV_INHIBIT_REASON_HYPERV) |
BIT(APICV_INHIBIT_REASON_BLOCKIRQ);
- return supported & BIT(bit);
+ return supported & BIT(reason);
}
static struct kvm_x86_ops vmx_x86_ops __initdata = {
@@ -7980,12 +7975,11 @@ static __init int hardware_setup(void)
if (!enable_apicv)
vmx_x86_ops.sync_pir_to_irr = NULL;
- if (cpu_has_vmx_tsc_scaling()) {
+ if (cpu_has_vmx_tsc_scaling())
kvm_has_tsc_control = true;
- kvm_max_tsc_scaling_ratio = KVM_VMX_TSC_MULTIPLIER_MAX;
- kvm_tsc_scaling_ratio_frac_bits = 48;
- }
+ kvm_max_tsc_scaling_ratio = KVM_VMX_TSC_MULTIPLIER_MAX;
+ kvm_tsc_scaling_ratio_frac_bits = 48;
kvm_has_bus_lock_exit = cpu_has_vmx_bus_lock_detection();
set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 02cf0a7e1d14..0c0ca599a353 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1748,9 +1748,6 @@ static int __kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data,
{
struct msr_data msr;
- if (!host_initiated && !kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_WRITE))
- return KVM_MSR_RET_FILTERED;
-
switch (index) {
case MSR_FS_BASE:
case MSR_GS_BASE:
@@ -1832,9 +1829,6 @@ int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data,
struct msr_data msr;
int ret;
- if (!host_initiated && !kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_READ))
- return KVM_MSR_RET_FILTERED;
-
switch (index) {
case MSR_TSC_AUX:
if (!kvm_is_supported_user_return_msr(MSR_TSC_AUX))
@@ -1871,6 +1865,20 @@ static int kvm_get_msr_ignored_check(struct kvm_vcpu *vcpu,
return ret;
}
+static int kvm_get_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 *data)
+{
+ if (!kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_READ))
+ return KVM_MSR_RET_FILTERED;
+ return kvm_get_msr_ignored_check(vcpu, index, data, false);
+}
+
+static int kvm_set_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 data)
+{
+ if (!kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_WRITE))
+ return KVM_MSR_RET_FILTERED;
+ return kvm_set_msr_ignored_check(vcpu, index, data, false);
+}
+
int kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data)
{
return kvm_get_msr_ignored_check(vcpu, index, data, false);
@@ -1953,7 +1961,7 @@ int kvm_emulate_rdmsr(struct kvm_vcpu *vcpu)
u64 data;
int r;
- r = kvm_get_msr(vcpu, ecx, &data);
+ r = kvm_get_msr_with_filter(vcpu, ecx, &data);
if (!r) {
trace_kvm_msr_read(ecx, data);
@@ -1978,7 +1986,7 @@ int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu)
u64 data = kvm_read_edx_eax(vcpu);
int r;
- r = kvm_set_msr(vcpu, ecx, data);
+ r = kvm_set_msr_with_filter(vcpu, ecx, data);
if (!r) {
trace_kvm_msr_write(ecx, data);
@@ -5938,7 +5946,7 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
smp_wmb();
kvm->arch.irqchip_mode = KVM_IRQCHIP_SPLIT;
kvm->arch.nr_reserved_ioapic_pins = cap->args[0];
- kvm_request_apicv_update(kvm, true, APICV_INHIBIT_REASON_ABSENT);
+ kvm_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_ABSENT);
r = 0;
split_irqchip_unlock:
mutex_unlock(&kvm->lock);
@@ -6335,7 +6343,7 @@ set_identity_unlock:
/* Write kvm->irq_routing before enabling irqchip_in_kernel. */
smp_wmb();
kvm->arch.irqchip_mode = KVM_IRQCHIP_KERNEL;
- kvm_request_apicv_update(kvm, true, APICV_INHIBIT_REASON_ABSENT);
+ kvm_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_ABSENT);
create_irqchip_unlock:
mutex_unlock(&kvm->lock);
break;
@@ -6726,7 +6734,7 @@ void kvm_get_segment(struct kvm_vcpu *vcpu,
static_call(kvm_x86_get_segment)(vcpu, var, seg);
}
-gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
+gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u64 access,
struct x86_exception *exception)
{
struct kvm_mmu *mmu = vcpu->arch.mmu;
@@ -6746,7 +6754,7 @@ gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
{
struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
- u32 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
+ u64 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
return mmu->gva_to_gpa(vcpu, mmu, gva, access, exception);
}
EXPORT_SYMBOL_GPL(kvm_mmu_gva_to_gpa_read);
@@ -6756,7 +6764,7 @@ EXPORT_SYMBOL_GPL(kvm_mmu_gva_to_gpa_read);
{
struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
- u32 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
+ u64 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
access |= PFERR_FETCH_MASK;
return mmu->gva_to_gpa(vcpu, mmu, gva, access, exception);
}
@@ -6766,7 +6774,7 @@ gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
{
struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
- u32 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
+ u64 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
access |= PFERR_WRITE_MASK;
return mmu->gva_to_gpa(vcpu, mmu, gva, access, exception);
}
@@ -6782,7 +6790,7 @@ gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
}
static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
- struct kvm_vcpu *vcpu, u32 access,
+ struct kvm_vcpu *vcpu, u64 access,
struct x86_exception *exception)
{
struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
@@ -6819,7 +6827,7 @@ static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt,
{
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
- u32 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
+ u64 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
unsigned offset;
int ret;
@@ -6844,7 +6852,7 @@ int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
gva_t addr, void *val, unsigned int bytes,
struct x86_exception *exception)
{
- u32 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
+ u64 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
/*
* FIXME: this should call handle_emulation_failure if X86EMUL_IO_NEEDED
@@ -6863,9 +6871,11 @@ static int emulator_read_std(struct x86_emulate_ctxt *ctxt,
struct x86_exception *exception, bool system)
{
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
- u32 access = 0;
+ u64 access = 0;
- if (!system && static_call(kvm_x86_get_cpl)(vcpu) == 3)
+ if (system)
+ access |= PFERR_IMPLICIT_ACCESS;
+ else if (static_call(kvm_x86_get_cpl)(vcpu) == 3)
access |= PFERR_USER_MASK;
return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, exception);
@@ -6881,7 +6891,7 @@ static int kvm_read_guest_phys_system(struct x86_emulate_ctxt *ctxt,
}
static int kvm_write_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
- struct kvm_vcpu *vcpu, u32 access,
+ struct kvm_vcpu *vcpu, u64 access,
struct x86_exception *exception)
{
struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
@@ -6915,9 +6925,11 @@ static int emulator_write_std(struct x86_emulate_ctxt *ctxt, gva_t addr, void *v
bool system)
{
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
- u32 access = PFERR_WRITE_MASK;
+ u64 access = PFERR_WRITE_MASK;
- if (!system && static_call(kvm_x86_get_cpl)(vcpu) == 3)
+ if (system)
+ access |= PFERR_IMPLICIT_ACCESS;
+ else if (static_call(kvm_x86_get_cpl)(vcpu) == 3)
access |= PFERR_USER_MASK;
return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
@@ -6984,7 +6996,7 @@ static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
bool write)
{
struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
- u32 access = ((static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0)
+ u64 access = ((static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0)
| (write ? PFERR_WRITE_MASK : 0);
/*
@@ -7627,13 +7639,13 @@ static void emulator_set_segment(struct x86_emulate_ctxt *ctxt, u16 selector,
return;
}
-static int emulator_get_msr(struct x86_emulate_ctxt *ctxt,
- u32 msr_index, u64 *pdata)
+static int emulator_get_msr_with_filter(struct x86_emulate_ctxt *ctxt,
+ u32 msr_index, u64 *pdata)
{
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
int r;
- r = kvm_get_msr(vcpu, msr_index, pdata);
+ r = kvm_get_msr_with_filter(vcpu, msr_index, pdata);
if (r && kvm_msr_user_space(vcpu, msr_index, KVM_EXIT_X86_RDMSR, 0,
complete_emulated_rdmsr, r)) {
@@ -7644,13 +7656,13 @@ static int emulator_get_msr(struct x86_emulate_ctxt *ctxt,
return r;
}
-static int emulator_set_msr(struct x86_emulate_ctxt *ctxt,
- u32 msr_index, u64 data)
+static int emulator_set_msr_with_filter(struct x86_emulate_ctxt *ctxt,
+ u32 msr_index, u64 data)
{
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
int r;
- r = kvm_set_msr(vcpu, msr_index, data);
+ r = kvm_set_msr_with_filter(vcpu, msr_index, data);
if (r && kvm_msr_user_space(vcpu, msr_index, KVM_EXIT_X86_WRMSR, data,
complete_emulated_msr_access, r)) {
@@ -7661,6 +7673,18 @@ static int emulator_set_msr(struct x86_emulate_ctxt *ctxt,
return r;
}
+static int emulator_get_msr(struct x86_emulate_ctxt *ctxt,
+ u32 msr_index, u64 *pdata)
+{
+ return kvm_get_msr(emul_to_vcpu(ctxt), msr_index, pdata);
+}
+
+static int emulator_set_msr(struct x86_emulate_ctxt *ctxt,
+ u32 msr_index, u64 data)
+{
+ return kvm_set_msr(emul_to_vcpu(ctxt), msr_index, data);
+}
+
static u64 emulator_get_smbase(struct x86_emulate_ctxt *ctxt)
{
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
@@ -7724,6 +7748,11 @@ static bool emulator_guest_has_fxsr(struct x86_emulate_ctxt *ctxt)
return guest_cpuid_has(emul_to_vcpu(ctxt), X86_FEATURE_FXSR);
}
+static bool emulator_guest_has_rdpid(struct x86_emulate_ctxt *ctxt)
+{
+ return guest_cpuid_has(emul_to_vcpu(ctxt), X86_FEATURE_RDPID);
+}
+
static ulong emulator_read_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg)
{
return kvm_register_read_raw(emul_to_vcpu(ctxt), reg);
@@ -7794,6 +7823,8 @@ static const struct x86_emulate_ops emulate_ops = {
.set_dr = emulator_set_dr,
.get_smbase = emulator_get_smbase,
.set_smbase = emulator_set_smbase,
+ .set_msr_with_filter = emulator_set_msr_with_filter,
+ .get_msr_with_filter = emulator_get_msr_with_filter,
.set_msr = emulator_set_msr,
.get_msr = emulator_get_msr,
.check_pmc = emulator_check_pmc,
@@ -7806,6 +7837,7 @@ static const struct x86_emulate_ops emulate_ops = {
.guest_has_long_mode = emulator_guest_has_long_mode,
.guest_has_movbe = emulator_guest_has_movbe,
.guest_has_fxsr = emulator_guest_has_fxsr,
+ .guest_has_rdpid = emulator_guest_has_rdpid,
.set_nmi_mask = emulator_set_nmi_mask,
.get_hflags = emulator_get_hflags,
.exiting_smm = emulator_exiting_smm,
@@ -9058,15 +9090,29 @@ bool kvm_apicv_activated(struct kvm *kvm)
}
EXPORT_SYMBOL_GPL(kvm_apicv_activated);
+
+static void set_or_clear_apicv_inhibit(unsigned long *inhibits,
+ enum kvm_apicv_inhibit reason, bool set)
+{
+ if (set)
+ __set_bit(reason, inhibits);
+ else
+ __clear_bit(reason, inhibits);
+
+ trace_kvm_apicv_inhibit_changed(reason, set, *inhibits);
+}
+
static void kvm_apicv_init(struct kvm *kvm)
{
+ unsigned long *inhibits = &kvm->arch.apicv_inhibit_reasons;
+
init_rwsem(&kvm->arch.apicv_update_lock);
- set_bit(APICV_INHIBIT_REASON_ABSENT,
- &kvm->arch.apicv_inhibit_reasons);
+ set_or_clear_apicv_inhibit(inhibits, APICV_INHIBIT_REASON_ABSENT, true);
+
if (!enable_apicv)
- set_bit(APICV_INHIBIT_REASON_DISABLE,
- &kvm->arch.apicv_inhibit_reasons);
+ set_or_clear_apicv_inhibit(inhibits,
+ APICV_INHIBIT_REASON_ABSENT, true);
}
static void kvm_sched_yield(struct kvm_vcpu *vcpu, unsigned long dest_id)
@@ -9740,24 +9786,21 @@ out:
}
EXPORT_SYMBOL_GPL(kvm_vcpu_update_apicv);
-void __kvm_request_apicv_update(struct kvm *kvm, bool activate, ulong bit)
+void __kvm_set_or_clear_apicv_inhibit(struct kvm *kvm,
+ enum kvm_apicv_inhibit reason, bool set)
{
unsigned long old, new;
lockdep_assert_held_write(&kvm->arch.apicv_update_lock);
- if (!static_call(kvm_x86_check_apicv_inhibit_reasons)(bit))
+ if (!static_call(kvm_x86_check_apicv_inhibit_reasons)(reason))
return;
old = new = kvm->arch.apicv_inhibit_reasons;
- if (activate)
- __clear_bit(bit, &new);
- else
- __set_bit(bit, &new);
+ set_or_clear_apicv_inhibit(&new, reason, set);
if (!!old != !!new) {
- trace_kvm_apicv_update_request(activate, bit);
/*
* Kick all vCPUs before setting apicv_inhibit_reasons to avoid
* false positives in the sanity check WARN in svm_vcpu_run().
@@ -9776,20 +9819,22 @@ void __kvm_request_apicv_update(struct kvm *kvm, bool activate, ulong bit)
unsigned long gfn = gpa_to_gfn(APIC_DEFAULT_PHYS_BASE);
kvm_zap_gfn_range(kvm, gfn, gfn+1);
}
- } else
+ } else {
kvm->arch.apicv_inhibit_reasons = new;
+ }
}
-void kvm_request_apicv_update(struct kvm *kvm, bool activate, ulong bit)
+void kvm_set_or_clear_apicv_inhibit(struct kvm *kvm,
+ enum kvm_apicv_inhibit reason, bool set)
{
if (!enable_apicv)
return;
down_write(&kvm->arch.apicv_update_lock);
- __kvm_request_apicv_update(kvm, activate, bit);
+ __kvm_set_or_clear_apicv_inhibit(kvm, reason, set);
up_write(&kvm->arch.apicv_update_lock);
}
-EXPORT_SYMBOL_GPL(kvm_request_apicv_update);
+EXPORT_SYMBOL_GPL(kvm_set_or_clear_apicv_inhibit);
static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
{
@@ -10937,7 +10982,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
static void kvm_arch_vcpu_guestdbg_update_apicv_inhibit(struct kvm *kvm)
{
- bool inhibit = false;
+ bool set = false;
struct kvm_vcpu *vcpu;
unsigned long i;
@@ -10945,11 +10990,11 @@ static void kvm_arch_vcpu_guestdbg_update_apicv_inhibit(struct kvm *kvm)
kvm_for_each_vcpu(i, vcpu, kvm) {
if (vcpu->guest_debug & KVM_GUESTDBG_BLOCKIRQ) {
- inhibit = true;
+ set = true;
break;
}
}
- __kvm_request_apicv_update(kvm, !inhibit, APICV_INHIBIT_REASON_BLOCKIRQ);
+ __kvm_set_or_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_BLOCKIRQ, set);
up_write(&kvm->arch.apicv_update_lock);
}
@@ -11557,10 +11602,8 @@ int kvm_arch_hardware_setup(void *opaque)
u64 max = min(0x7fffffffULL,
__scale_tsc(kvm_max_tsc_scaling_ratio, tsc_khz));
kvm_max_guest_tsc_khz = max;
-
- kvm_default_tsc_scaling_ratio = 1ULL << kvm_tsc_scaling_ratio_frac_bits;
}
-
+ kvm_default_tsc_scaling_ratio = 1ULL << kvm_tsc_scaling_ratio_frac_bits;
kvm_init_msr_list();
return 0;
}
@@ -11629,12 +11672,13 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
ret = kvm_page_track_init(kvm);
if (ret)
- return ret;
+ goto out;
+
+ ret = kvm_mmu_init_vm(kvm);
+ if (ret)
+ goto out_page_track;
INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list);
- INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
- INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages);
- INIT_LIST_HEAD(&kvm->arch.lpage_disallowed_mmu_pages);
INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
atomic_set(&kvm->arch.noncoherent_dma_count, 0);
@@ -11666,10 +11710,14 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kvm_apicv_init(kvm);
kvm_hv_init_vm(kvm);
- kvm_mmu_init_vm(kvm);
kvm_xen_init_vm(kvm);
return static_call(kvm_x86_vm_init)(kvm);
+
+out_page_track:
+ kvm_page_track_cleanup(kvm);
+out:
+ return ret;
}
int kvm_arch_post_init_vm(struct kvm *kvm)
@@ -12593,7 +12641,7 @@ void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_c
{
struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
struct x86_exception fault;
- u32 access = error_code &
+ u64 access = error_code &
(PFERR_WRITE_MASK | PFERR_FETCH_MASK | PFERR_USER_MASK);
if (!(error_code & PFERR_PRESENT_MASK) ||
@@ -12933,7 +12981,6 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_pi_irte_update);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_unaccelerated_access);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_incomplete_ipi);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_ga_log);
-EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_apicv_update_request);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_apicv_accept_irq);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_enter);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_exit);
diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c
index 4aa0f2b31665..bf6cc25eee76 100644
--- a/arch/x86/kvm/xen.c
+++ b/arch/x86/kvm/xen.c
@@ -39,8 +39,8 @@ static int kvm_xen_shared_info_init(struct kvm *kvm, gfn_t gfn)
}
do {
- ret = kvm_gfn_to_pfn_cache_init(kvm, gpc, NULL, false, true,
- gpa, PAGE_SIZE, false);
+ ret = kvm_gfn_to_pfn_cache_init(kvm, gpc, NULL, KVM_HOST_USES_PFN,
+ gpa, PAGE_SIZE);
if (ret)
goto out;
@@ -1025,8 +1025,7 @@ static int evtchn_set_fn(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm
break;
idx = srcu_read_lock(&kvm->srcu);
- rc = kvm_gfn_to_pfn_cache_refresh(kvm, gpc, gpc->gpa,
- PAGE_SIZE, false);
+ rc = kvm_gfn_to_pfn_cache_refresh(kvm, gpc, gpc->gpa, PAGE_SIZE);
srcu_read_unlock(&kvm->srcu, idx);
} while(!rc);
diff --git a/arch/x86/lib/csum-partial_64.c b/arch/x86/lib/csum-partial_64.c
index 1f8a8f895173..50734a23034c 100644
--- a/arch/x86/lib/csum-partial_64.c
+++ b/arch/x86/lib/csum-partial_64.c
@@ -93,7 +93,6 @@ __wsum csum_partial(const void *buff, int len, __wsum sum)
buff += 8;
}
if (len & 7) {
-#ifdef CONFIG_DCACHE_WORD_ACCESS
unsigned int shift = (8 - (len & 7)) * 8;
unsigned long trail;
@@ -103,31 +102,6 @@ __wsum csum_partial(const void *buff, int len, __wsum sum)
"adcq $0,%[res]"
: [res] "+r" (temp64)
: [trail] "r" (trail));
-#else
- if (len & 4) {
- asm("addq %[val],%[res]\n\t"
- "adcq $0,%[res]"
- : [res] "+r" (temp64)
- : [val] "r" ((u64)*(u32 *)buff)
- : "memory");
- buff += 4;
- }
- if (len & 2) {
- asm("addq %[val],%[res]\n\t"
- "adcq $0,%[res]"
- : [res] "+r" (temp64)
- : [val] "r" ((u64)*(u16 *)buff)
- : "memory");
- buff += 2;
- }
- if (len & 1) {
- asm("addq %[val],%[res]\n\t"
- "adcq $0,%[res]"
- : [res] "+r" (temp64)
- : [val] "r" ((u64)*(u8 *)buff)
- : "memory");
- }
-#endif
}
result = add32_with_carry(temp64 >> 32, temp64 & 0xffffffff);
if (unlikely(odd)) {
diff --git a/arch/x86/um/Kconfig b/arch/x86/um/Kconfig
index ead7e5b3a975..1bcd42c53039 100644
--- a/arch/x86/um/Kconfig
+++ b/arch/x86/um/Kconfig
@@ -9,6 +9,7 @@ endmenu
config UML_X86
def_bool y
select ARCH_BINFMT_ELF_EXTRA_PHDRS if X86_32
+ select DCACHE_WORD_ACCESS
config 64BIT
bool "64-bit kernel" if "$(SUBARCH)" = "x86"
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 0430926426fe..8dfe62786cd5 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -65,19 +65,12 @@ static bool blkcg_policy_enabled(struct request_queue *q,
return pol && test_bit(pol->plid, q->blkcg_pols);
}
-/**
- * blkg_free - free a blkg
- * @blkg: blkg to free
- *
- * Free @blkg which may be partially allocated.
- */
-static void blkg_free(struct blkcg_gq *blkg)
+static void blkg_free_workfn(struct work_struct *work)
{
+ struct blkcg_gq *blkg = container_of(work, struct blkcg_gq,
+ free_work);
int i;
- if (!blkg)
- return;
-
for (i = 0; i < BLKCG_MAX_POLS; i++)
if (blkg->pd[i])
blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
@@ -89,6 +82,25 @@ static void blkg_free(struct blkcg_gq *blkg)
kfree(blkg);
}
+/**
+ * blkg_free - free a blkg
+ * @blkg: blkg to free
+ *
+ * Free @blkg which may be partially allocated.
+ */
+static void blkg_free(struct blkcg_gq *blkg)
+{
+ if (!blkg)
+ return;
+
+ /*
+ * Both ->pd_free_fn() and request queue's release handler may
+ * sleep, so free us by scheduling one work func
+ */
+ INIT_WORK(&blkg->free_work, blkg_free_workfn);
+ schedule_work(&blkg->free_work);
+}
+
static void __blkg_release(struct rcu_head *rcu)
{
struct blkcg_gq *blkg = container_of(rcu, struct blkcg_gq, rcu_head);
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index 11f49f78db32..df9cfe4ca532 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -280,7 +280,6 @@ int set_task_ioprio(struct task_struct *task, int ioprio)
task_lock(task);
if (task->flags & PF_EXITING) {
- err = -ESRCH;
kmem_cache_free(iocontext_cachep, ioc);
goto out;
}
@@ -292,7 +291,7 @@ int set_task_ioprio(struct task_struct *task, int ioprio)
task->io_context->ioprio = ioprio;
out:
task_unlock(task);
- return err;
+ return 0;
}
EXPORT_SYMBOL_GPL(set_task_ioprio);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index e6f24fa4a4c2..ed3ed86f7dd2 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -4462,21 +4462,28 @@ static bool blk_mq_elv_switch_none(struct list_head *head,
return true;
}
-static void blk_mq_elv_switch_back(struct list_head *head,
- struct request_queue *q)
+static struct blk_mq_qe_pair *blk_lookup_qe_pair(struct list_head *head,
+ struct request_queue *q)
{
struct blk_mq_qe_pair *qe;
- struct elevator_type *t = NULL;
list_for_each_entry(qe, head, node)
- if (qe->q == q) {
- t = qe->type;
- break;
- }
+ if (qe->q == q)
+ return qe;
- if (!t)
- return;
+ return NULL;
+}
+static void blk_mq_elv_switch_back(struct list_head *head,
+ struct request_queue *q)
+{
+ struct blk_mq_qe_pair *qe;
+ struct elevator_type *t;
+
+ qe = blk_lookup_qe_pair(head, q);
+ if (!qe)
+ return;
+ t = qe->type;
list_del(&qe->node);
kfree(qe);
diff --git a/block/blk-wbt.h b/block/blk-wbt.h
index 2eb01becde8c..7e44eccc676d 100644
--- a/block/blk-wbt.h
+++ b/block/blk-wbt.h
@@ -101,9 +101,6 @@ u64 wbt_default_latency_nsec(struct request_queue *);
#else
-static inline void wbt_track(struct request *rq, enum wbt_flags flags)
-{
-}
static inline int wbt_init(struct request_queue *q)
{
return -EINVAL;
diff --git a/block/genhd.c b/block/genhd.c
index c9a4fc90d3e9..b8b6759d670f 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -335,7 +335,7 @@ int blk_alloc_ext_minor(void)
{
int idx;
- idx = ida_alloc_range(&ext_devt_ida, 0, NR_EXT_DEVT, GFP_KERNEL);
+ idx = ida_alloc_range(&ext_devt_ida, 0, NR_EXT_DEVT - 1, GFP_KERNEL);
if (idx == -ENOSPC)
return -EBUSY;
return idx;
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 96881d5babd9..9676a1d214bc 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -171,7 +171,7 @@ void tl_release(struct drbd_connection *connection, unsigned int barrier_nr,
unsigned int set_size)
{
struct drbd_request *r;
- struct drbd_request *req = NULL;
+ struct drbd_request *req = NULL, *tmp = NULL;
int expect_epoch = 0;
int expect_size = 0;
@@ -225,8 +225,11 @@ void tl_release(struct drbd_connection *connection, unsigned int barrier_nr,
* to catch requests being barrier-acked "unexpectedly".
* It usually should find the same req again, or some READ preceding it. */
list_for_each_entry(req, &connection->transfer_log, tl_requests)
- if (req->epoch == expect_epoch)
+ if (req->epoch == expect_epoch) {
+ tmp = req;
break;
+ }
+ req = list_prepare_entry(tmp, &connection->transfer_log, tl_requests);
list_for_each_entry_safe_from(req, r, &connection->transfer_log, tl_requests) {
if (req->epoch != expect_epoch)
break;
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index c04394518b07..75be0e16770a 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -180,7 +180,8 @@ void start_new_tl_epoch(struct drbd_connection *connection)
void complete_master_bio(struct drbd_device *device,
struct bio_and_error *m)
{
- m->bio->bi_status = errno_to_blk_status(m->error);
+ if (unlikely(m->error))
+ m->bio->bi_status = errno_to_blk_status(m->error);
bio_endio(m->bio);
dec_ap_bio(device);
}
@@ -332,17 +333,21 @@ static void set_if_null_req_next(struct drbd_peer_device *peer_device, struct dr
static void advance_conn_req_next(struct drbd_peer_device *peer_device, struct drbd_request *req)
{
struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
+ struct drbd_request *iter = req;
if (!connection)
return;
if (connection->req_next != req)
return;
- list_for_each_entry_continue(req, &connection->transfer_log, tl_requests) {
- const unsigned s = req->rq_state;
- if (s & RQ_NET_QUEUED)
+
+ req = NULL;
+ list_for_each_entry_continue(iter, &connection->transfer_log, tl_requests) {
+ const unsigned int s = iter->rq_state;
+
+ if (s & RQ_NET_QUEUED) {
+ req = iter;
break;
+ }
}
- if (&req->tl_requests == &connection->transfer_log)
- req = NULL;
connection->req_next = req;
}
@@ -358,17 +363,21 @@ static void set_if_null_req_ack_pending(struct drbd_peer_device *peer_device, st
static void advance_conn_req_ack_pending(struct drbd_peer_device *peer_device, struct drbd_request *req)
{
struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
+ struct drbd_request *iter = req;
if (!connection)
return;
if (connection->req_ack_pending != req)
return;
- list_for_each_entry_continue(req, &connection->transfer_log, tl_requests) {
- const unsigned s = req->rq_state;
- if ((s & RQ_NET_SENT) && (s & RQ_NET_PENDING))
+
+ req = NULL;
+ list_for_each_entry_continue(iter, &connection->transfer_log, tl_requests) {
+ const unsigned int s = iter->rq_state;
+
+ if ((s & RQ_NET_SENT) && (s & RQ_NET_PENDING)) {
+ req = iter;
break;
+ }
}
- if (&req->tl_requests == &connection->transfer_log)
- req = NULL;
connection->req_ack_pending = req;
}
@@ -384,17 +393,21 @@ static void set_if_null_req_not_net_done(struct drbd_peer_device *peer_device, s
static void advance_conn_req_not_net_done(struct drbd_peer_device *peer_device, struct drbd_request *req)
{
struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
+ struct drbd_request *iter = req;
if (!connection)
return;
if (connection->req_not_net_done != req)
return;
- list_for_each_entry_continue(req, &connection->transfer_log, tl_requests) {
- const unsigned s = req->rq_state;
- if ((s & RQ_NET_SENT) && !(s & RQ_NET_DONE))
+
+ req = NULL;
+ list_for_each_entry_continue(iter, &connection->transfer_log, tl_requests) {
+ const unsigned int s = iter->rq_state;
+
+ if ((s & RQ_NET_SENT) && !(s & RQ_NET_DONE)) {
+ req = iter;
break;
+ }
}
- if (&req->tl_requests == &connection->transfer_log)
- req = NULL;
connection->req_not_net_done = req;
}
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 3e636a75c83a..a58595f5ee2c 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1591,6 +1591,7 @@ struct compat_loop_info {
compat_ulong_t lo_inode; /* ioctl r/o */
compat_dev_t lo_rdevice; /* ioctl r/o */
compat_int_t lo_offset;
+ compat_int_t lo_encrypt_type; /* obsolete, ignored */
compat_int_t lo_encrypt_key_size; /* ioctl w/o */
compat_int_t lo_flags; /* ioctl r/o */
char lo_name[LO_NAME_SIZE];
diff --git a/drivers/block/n64cart.c b/drivers/block/n64cart.c
index 4db9a8c244af..e094d2b8b5a9 100644
--- a/drivers/block/n64cart.c
+++ b/drivers/block/n64cart.c
@@ -88,7 +88,7 @@ static void n64cart_submit_bio(struct bio *bio)
{
struct bio_vec bvec;
struct bvec_iter iter;
- struct device *dev = bio->bi_disk->private_data;
+ struct device *dev = bio->bi_bdev->bd_disk->private_data;
u32 pos = bio->bi_iter.bi_sector << SECTOR_SHIFT;
bio_for_each_segment(bvec, bio, iter) {
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index d1e26461a64e..de42458195bc 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -931,7 +931,7 @@ static int xen_blkbk_parse_indirect(struct blkif_request *req,
if (rc)
goto unmap;
- for (n = 0, i = 0; n < nseg; n++) {
+ for (n = 0; n < nseg; n++) {
uint8_t first_sect, last_sect;
if ((n % SEGS_PER_INDIRECT_FRAME) == 0) {
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 378262ec47ae..003056d4f7f5 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -576,7 +576,7 @@ struct setup_rw_req {
struct blkif_request *ring_req;
grant_ref_t gref_head;
unsigned int id;
- /* Only used when persistent grant is used and it's a read request */
+ /* Only used when persistent grant is used and it's a write request */
bool need_copy;
unsigned int bvec_off;
char *bvec_data;
diff --git a/drivers/clk/sunxi-ng/Kconfig b/drivers/clk/sunxi-ng/Kconfig
index 68a94e5af8ed..461537679c04 100644
--- a/drivers/clk/sunxi-ng/Kconfig
+++ b/drivers/clk/sunxi-ng/Kconfig
@@ -69,6 +69,11 @@ config SUN6I_A31_CCU
default MACH_SUN6I
depends on MACH_SUN6I || COMPILE_TEST
+config SUN6I_RTC_CCU
+ tristate "Support for the Allwinner H616/R329 RTC CCU"
+ default ARCH_SUNXI
+ depends on ARCH_SUNXI || COMPILE_TEST
+
config SUN8I_A23_CCU
tristate "Support for the Allwinner A23 CCU"
default MACH_SUN8I
diff --git a/drivers/clk/sunxi-ng/Makefile b/drivers/clk/sunxi-ng/Makefile
index ec931cb7aa14..6b3ae2b620db 100644
--- a/drivers/clk/sunxi-ng/Makefile
+++ b/drivers/clk/sunxi-ng/Makefile
@@ -36,6 +36,7 @@ obj-$(CONFIG_SUN50I_H616_CCU) += sun50i-h616-ccu.o
obj-$(CONFIG_SUN4I_A10_CCU) += sun4i-a10-ccu.o
obj-$(CONFIG_SUN5I_CCU) += sun5i-ccu.o
obj-$(CONFIG_SUN6I_A31_CCU) += sun6i-a31-ccu.o
+obj-$(CONFIG_SUN6I_RTC_CCU) += sun6i-rtc-ccu.o
obj-$(CONFIG_SUN8I_A23_CCU) += sun8i-a23-ccu.o
obj-$(CONFIG_SUN8I_A33_CCU) += sun8i-a33-ccu.o
obj-$(CONFIG_SUN8I_A83T_CCU) += sun8i-a83t-ccu.o
@@ -60,6 +61,7 @@ sun50i-h616-ccu-y += ccu-sun50i-h616.o
sun4i-a10-ccu-y += ccu-sun4i-a10.o
sun5i-ccu-y += ccu-sun5i.o
sun6i-a31-ccu-y += ccu-sun6i-a31.o
+sun6i-rtc-ccu-y += ccu-sun6i-rtc.o
sun8i-a23-ccu-y += ccu-sun8i-a23.o
sun8i-a33-ccu-y += ccu-sun8i-a33.o
sun8i-a83t-ccu-y += ccu-sun8i-a83t.o
diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-rtc.c b/drivers/clk/sunxi-ng/ccu-sun6i-rtc.c
new file mode 100644
index 000000000000..8a10bade7e0d
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu-sun6i-rtc.c
@@ -0,0 +1,395 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (c) 2021 Samuel Holland <samuel@sholland.org>
+//
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+
+#include <linux/clk/sunxi-ng.h>
+
+#include "ccu_common.h"
+
+#include "ccu_div.h"
+#include "ccu_gate.h"
+#include "ccu_mux.h"
+
+#include "ccu-sun6i-rtc.h"
+
+#define IOSC_ACCURACY 300000000 /* 30% */
+#define IOSC_RATE 16000000
+
+#define LOSC_RATE 32768
+#define LOSC_RATE_SHIFT 15
+
+#define LOSC_CTRL_REG 0x0
+#define LOSC_CTRL_KEY 0x16aa0000
+
+#define IOSC_32K_CLK_DIV_REG 0x8
+#define IOSC_32K_CLK_DIV GENMASK(4, 0)
+#define IOSC_32K_PRE_DIV 32
+
+#define IOSC_CLK_CALI_REG 0xc
+#define IOSC_CLK_CALI_DIV_ONES 22
+#define IOSC_CLK_CALI_EN BIT(1)
+#define IOSC_CLK_CALI_SRC_SEL BIT(0)
+
+#define LOSC_OUT_GATING_REG 0x60
+
+#define DCXO_CTRL_REG 0x160
+#define DCXO_CTRL_CLK16M_RC_EN BIT(0)
+
+struct sun6i_rtc_match_data {
+ bool have_ext_osc32k : 1;
+ bool have_iosc_calibration : 1;
+ bool rtc_32k_single_parent : 1;
+ const struct clk_parent_data *osc32k_fanout_parents;
+ u8 osc32k_fanout_nparents;
+};
+
+static bool have_iosc_calibration;
+
+static int ccu_iosc_enable(struct clk_hw *hw)
+{
+ struct ccu_common *cm = hw_to_ccu_common(hw);
+
+ return ccu_gate_helper_enable(cm, DCXO_CTRL_CLK16M_RC_EN);
+}
+
+static void ccu_iosc_disable(struct clk_hw *hw)
+{
+ struct ccu_common *cm = hw_to_ccu_common(hw);
+
+ return ccu_gate_helper_disable(cm, DCXO_CTRL_CLK16M_RC_EN);
+}
+
+static int ccu_iosc_is_enabled(struct clk_hw *hw)
+{
+ struct ccu_common *cm = hw_to_ccu_common(hw);
+
+ return ccu_gate_helper_is_enabled(cm, DCXO_CTRL_CLK16M_RC_EN);
+}
+
+static unsigned long ccu_iosc_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct ccu_common *cm = hw_to_ccu_common(hw);
+
+ if (have_iosc_calibration) {
+ u32 reg = readl(cm->base + IOSC_CLK_CALI_REG);
+
+ /*
+ * Recover the IOSC frequency by shifting the ones place of
+ * (fixed-point divider * 32768) into bit zero.
+ */
+ if (reg & IOSC_CLK_CALI_EN)
+ return reg >> (IOSC_CLK_CALI_DIV_ONES - LOSC_RATE_SHIFT);
+ }
+
+ return IOSC_RATE;
+}
+
+static unsigned long ccu_iosc_recalc_accuracy(struct clk_hw *hw,
+ unsigned long parent_accuracy)
+{
+ return IOSC_ACCURACY;
+}
+
+static const struct clk_ops ccu_iosc_ops = {
+ .enable = ccu_iosc_enable,
+ .disable = ccu_iosc_disable,
+ .is_enabled = ccu_iosc_is_enabled,
+ .recalc_rate = ccu_iosc_recalc_rate,
+ .recalc_accuracy = ccu_iosc_recalc_accuracy,
+};
+
+static struct ccu_common iosc_clk = {
+ .reg = DCXO_CTRL_REG,
+ .hw.init = CLK_HW_INIT_NO_PARENT("iosc", &ccu_iosc_ops,
+ CLK_GET_RATE_NOCACHE),
+};
+
+static int ccu_iosc_32k_prepare(struct clk_hw *hw)
+{
+ struct ccu_common *cm = hw_to_ccu_common(hw);
+ u32 val;
+
+ if (!have_iosc_calibration)
+ return 0;
+
+ val = readl(cm->base + IOSC_CLK_CALI_REG);
+ writel(val | IOSC_CLK_CALI_EN | IOSC_CLK_CALI_SRC_SEL,
+ cm->base + IOSC_CLK_CALI_REG);
+
+ return 0;
+}
+
+static void ccu_iosc_32k_unprepare(struct clk_hw *hw)
+{
+ struct ccu_common *cm = hw_to_ccu_common(hw);
+ u32 val;
+
+ if (!have_iosc_calibration)
+ return;
+
+ val = readl(cm->base + IOSC_CLK_CALI_REG);
+ writel(val & ~(IOSC_CLK_CALI_EN | IOSC_CLK_CALI_SRC_SEL),
+ cm->base + IOSC_CLK_CALI_REG);
+}
+
+static unsigned long ccu_iosc_32k_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct ccu_common *cm = hw_to_ccu_common(hw);
+ u32 val;
+
+ if (have_iosc_calibration) {
+ val = readl(cm->base + IOSC_CLK_CALI_REG);
+
+ /* Assume the calibrated 32k clock is accurate. */
+ if (val & IOSC_CLK_CALI_SRC_SEL)
+ return LOSC_RATE;
+ }
+
+ val = readl(cm->base + IOSC_32K_CLK_DIV_REG) & IOSC_32K_CLK_DIV;
+
+ return parent_rate / IOSC_32K_PRE_DIV / (val + 1);
+}
+
+static unsigned long ccu_iosc_32k_recalc_accuracy(struct clk_hw *hw,
+ unsigned long parent_accuracy)
+{
+ struct ccu_common *cm = hw_to_ccu_common(hw);
+ u32 val;
+
+ if (have_iosc_calibration) {
+ val = readl(cm->base + IOSC_CLK_CALI_REG);
+
+ /* Assume the calibrated 32k clock is accurate. */
+ if (val & IOSC_CLK_CALI_SRC_SEL)
+ return 0;
+ }
+
+ return parent_accuracy;
+}
+
+static const struct clk_ops ccu_iosc_32k_ops = {
+ .prepare = ccu_iosc_32k_prepare,
+ .unprepare = ccu_iosc_32k_unprepare,
+ .recalc_rate = ccu_iosc_32k_recalc_rate,
+ .recalc_accuracy = ccu_iosc_32k_recalc_accuracy,
+};
+
+static struct ccu_common iosc_32k_clk = {
+ .hw.init = CLK_HW_INIT_HW("iosc-32k", &iosc_clk.hw,
+ &ccu_iosc_32k_ops,
+ CLK_GET_RATE_NOCACHE),
+};
+
+static const struct clk_hw *ext_osc32k[] = { NULL }; /* updated during probe */
+
+static SUNXI_CCU_GATE_HWS(ext_osc32k_gate_clk, "ext-osc32k-gate",
+ ext_osc32k, 0x0, BIT(4), 0);
+
+static const struct clk_hw *osc32k_parents[] = {
+ &iosc_32k_clk.hw,
+ &ext_osc32k_gate_clk.common.hw
+};
+
+static struct clk_init_data osc32k_init_data = {
+ .name = "osc32k",
+ .ops = &ccu_mux_ops,
+ .parent_hws = osc32k_parents,
+ .num_parents = ARRAY_SIZE(osc32k_parents), /* updated during probe */
+};
+
+static struct ccu_mux osc32k_clk = {
+ .mux = _SUNXI_CCU_MUX(0, 1),
+ .common = {
+ .reg = LOSC_CTRL_REG,
+ .features = CCU_FEATURE_KEY_FIELD,
+ .hw.init = &osc32k_init_data,
+ },
+};
+
+/* This falls back to the global name for fwnodes without a named reference. */
+static const struct clk_parent_data osc24M[] = {
+ { .fw_name = "hosc", .name = "osc24M" }
+};
+
+static struct ccu_gate osc24M_32k_clk = {
+ .enable = BIT(16),
+ .common = {
+ .reg = LOSC_OUT_GATING_REG,
+ .prediv = 750,
+ .features = CCU_FEATURE_ALL_PREDIV,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("osc24M-32k", osc24M,
+ &ccu_gate_ops, 0),
+ },
+};
+
+static const struct clk_hw *rtc_32k_parents[] = {
+ &osc32k_clk.common.hw,
+ &osc24M_32k_clk.common.hw
+};
+
+static struct clk_init_data rtc_32k_init_data = {
+ .name = "rtc-32k",
+ .ops = &ccu_mux_ops,
+ .parent_hws = rtc_32k_parents,
+ .num_parents = ARRAY_SIZE(rtc_32k_parents), /* updated during probe */
+};
+
+static struct ccu_mux rtc_32k_clk = {
+ .mux = _SUNXI_CCU_MUX(1, 1),
+ .common = {
+ .reg = LOSC_CTRL_REG,
+ .features = CCU_FEATURE_KEY_FIELD,
+ .hw.init = &rtc_32k_init_data,
+ },
+};
+
+static struct clk_init_data osc32k_fanout_init_data = {
+ .name = "osc32k-fanout",
+ .ops = &ccu_mux_ops,
+ /* parents are set during probe */
+};
+
+static struct ccu_mux osc32k_fanout_clk = {
+ .enable = BIT(0),
+ .mux = _SUNXI_CCU_MUX(1, 2),
+ .common = {
+ .reg = LOSC_OUT_GATING_REG,
+ .hw.init = &osc32k_fanout_init_data,
+ },
+};
+
+static struct ccu_common *sun6i_rtc_ccu_clks[] = {
+ &iosc_clk,
+ &iosc_32k_clk,
+ &ext_osc32k_gate_clk.common,
+ &osc32k_clk.common,
+ &osc24M_32k_clk.common,
+ &rtc_32k_clk.common,
+ &osc32k_fanout_clk.common,
+};
+
+static struct clk_hw_onecell_data sun6i_rtc_ccu_hw_clks = {
+ .num = CLK_NUMBER,
+ .hws = {
+ [CLK_OSC32K] = &osc32k_clk.common.hw,
+ [CLK_OSC32K_FANOUT] = &osc32k_fanout_clk.common.hw,
+ [CLK_IOSC] = &iosc_clk.hw,
+ [CLK_IOSC_32K] = &iosc_32k_clk.hw,
+ [CLK_EXT_OSC32K_GATE] = &ext_osc32k_gate_clk.common.hw,
+ [CLK_OSC24M_32K] = &osc24M_32k_clk.common.hw,
+ [CLK_RTC_32K] = &rtc_32k_clk.common.hw,
+ },
+};
+
+static const struct sunxi_ccu_desc sun6i_rtc_ccu_desc = {
+ .ccu_clks = sun6i_rtc_ccu_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun6i_rtc_ccu_clks),
+
+ .hw_clks = &sun6i_rtc_ccu_hw_clks,
+};
+
+static const struct clk_parent_data sun50i_h6_osc32k_fanout_parents[] = {
+ { .hw = &osc32k_clk.common.hw },
+};
+
+static const struct clk_parent_data sun50i_h616_osc32k_fanout_parents[] = {
+ { .hw = &osc32k_clk.common.hw },
+ { .fw_name = "pll-32k" },
+ { .hw = &osc24M_32k_clk.common.hw }
+};
+
+static const struct clk_parent_data sun50i_r329_osc32k_fanout_parents[] = {
+ { .hw = &osc32k_clk.common.hw },
+ { .hw = &ext_osc32k_gate_clk.common.hw },
+ { .hw = &osc24M_32k_clk.common.hw }
+};
+
+static const struct sun6i_rtc_match_data sun50i_h6_rtc_ccu_data = {
+ .have_ext_osc32k = true,
+ .have_iosc_calibration = true,
+ .osc32k_fanout_parents = sun50i_h6_osc32k_fanout_parents,
+ .osc32k_fanout_nparents = ARRAY_SIZE(sun50i_h6_osc32k_fanout_parents),
+};
+
+static const struct sun6i_rtc_match_data sun50i_h616_rtc_ccu_data = {
+ .have_iosc_calibration = true,
+ .rtc_32k_single_parent = true,
+ .osc32k_fanout_parents = sun50i_h616_osc32k_fanout_parents,
+ .osc32k_fanout_nparents = ARRAY_SIZE(sun50i_h616_osc32k_fanout_parents),
+};
+
+static const struct sun6i_rtc_match_data sun50i_r329_rtc_ccu_data = {
+ .have_ext_osc32k = true,
+ .osc32k_fanout_parents = sun50i_r329_osc32k_fanout_parents,
+ .osc32k_fanout_nparents = ARRAY_SIZE(sun50i_r329_osc32k_fanout_parents),
+};
+
+static const struct of_device_id sun6i_rtc_ccu_match[] = {
+ {
+ .compatible = "allwinner,sun50i-h6-rtc",
+ .data = &sun50i_h6_rtc_ccu_data,
+ },
+ {
+ .compatible = "allwinner,sun50i-h616-rtc",
+ .data = &sun50i_h616_rtc_ccu_data,
+ },
+ {
+ .compatible = "allwinner,sun50i-r329-rtc",
+ .data = &sun50i_r329_rtc_ccu_data,
+ },
+};
+
+int sun6i_rtc_ccu_probe(struct device *dev, void __iomem *reg)
+{
+ const struct sun6i_rtc_match_data *data;
+ struct clk *ext_osc32k_clk = NULL;
+ const struct of_device_id *match;
+
+ /* This driver is only used for newer variants of the hardware. */
+ match = of_match_device(sun6i_rtc_ccu_match, dev);
+ if (!match)
+ return 0;
+
+ data = match->data;
+ have_iosc_calibration = data->have_iosc_calibration;
+
+ if (data->have_ext_osc32k) {
+ const char *fw_name;
+
+ /* ext-osc32k was the only input clock in the old binding. */
+ fw_name = of_property_read_bool(dev->of_node, "clock-names")
+ ? "ext-osc32k" : NULL;
+ ext_osc32k_clk = devm_clk_get_optional(dev, fw_name);
+ if (IS_ERR(ext_osc32k_clk))
+ return PTR_ERR(ext_osc32k_clk);
+ }
+
+ if (ext_osc32k_clk) {
+ /* Link ext-osc32k-gate to its parent. */
+ *ext_osc32k = __clk_get_hw(ext_osc32k_clk);
+ } else {
+ /* ext-osc32k-gate is an orphan, so do not register it. */
+ sun6i_rtc_ccu_hw_clks.hws[CLK_EXT_OSC32K_GATE] = NULL;
+ osc32k_init_data.num_parents = 1;
+ }
+
+ if (data->rtc_32k_single_parent)
+ rtc_32k_init_data.num_parents = 1;
+
+ osc32k_fanout_init_data.parent_data = data->osc32k_fanout_parents;
+ osc32k_fanout_init_data.num_parents = data->osc32k_fanout_nparents;
+
+ return devm_sunxi_ccu_probe(dev, reg, &sun6i_rtc_ccu_desc);
+}
+
+MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-rtc.h b/drivers/clk/sunxi-ng/ccu-sun6i-rtc.h
new file mode 100644
index 000000000000..9ae821fc2599
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu-sun6i-rtc.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _CCU_SUN6I_RTC_H
+#define _CCU_SUN6I_RTC_H
+
+#include <dt-bindings/clock/sun6i-rtc.h>
+
+#define CLK_IOSC_32K 3
+#define CLK_EXT_OSC32K_GATE 4
+#define CLK_OSC24M_32K 5
+#define CLK_RTC_32K 6
+
+#define CLK_NUMBER (CLK_RTC_32K + 1)
+
+#endif /* _CCU_SUN6I_RTC_H */
diff --git a/drivers/clk/sunxi-ng/ccu_common.h b/drivers/clk/sunxi-ng/ccu_common.h
index 98a1834b58bb..fbf16c6b896d 100644
--- a/drivers/clk/sunxi-ng/ccu_common.h
+++ b/drivers/clk/sunxi-ng/ccu_common.h
@@ -17,6 +17,7 @@
#define CCU_FEATURE_LOCK_REG BIT(5)
#define CCU_FEATURE_MMC_TIMING_SWITCH BIT(6)
#define CCU_FEATURE_SIGMA_DELTA_MOD BIT(7)
+#define CCU_FEATURE_KEY_FIELD BIT(8)
/* MMC timing mode switch bit */
#define CCU_MMC_NEW_TIMING_MODE BIT(30)
diff --git a/drivers/clk/sunxi-ng/ccu_mux.c b/drivers/clk/sunxi-ng/ccu_mux.c
index 2306a1cd83e4..1d557e323169 100644
--- a/drivers/clk/sunxi-ng/ccu_mux.c
+++ b/drivers/clk/sunxi-ng/ccu_mux.c
@@ -12,6 +12,8 @@
#include "ccu_gate.h"
#include "ccu_mux.h"
+#define CCU_MUX_KEY_VALUE 0x16aa0000
+
static u16 ccu_mux_get_prediv(struct ccu_common *common,
struct ccu_mux_internal *cm,
int parent_index)
@@ -191,6 +193,11 @@ int ccu_mux_helper_set_parent(struct ccu_common *common,
spin_lock_irqsave(common->lock, flags);
reg = readl(common->base + common->reg);
+
+ /* The key field always reads as zero. */
+ if (common->features & CCU_FEATURE_KEY_FIELD)
+ reg |= CCU_MUX_KEY_VALUE;
+
reg &= ~GENMASK(cm->width + cm->shift - 1, cm->shift);
writel(reg | (index << cm->shift), common->base + common->reg);
diff --git a/drivers/cpuidle/Kconfig b/drivers/cpuidle/Kconfig
index c0aeedd66f02..ff71dd662880 100644
--- a/drivers/cpuidle/Kconfig
+++ b/drivers/cpuidle/Kconfig
@@ -47,6 +47,10 @@ config CPU_IDLE_GOV_HALTPOLL
config DT_IDLE_STATES
bool
+config DT_IDLE_GENPD
+ depends on PM_GENERIC_DOMAINS_OF
+ bool
+
menu "ARM CPU Idle Drivers"
depends on ARM || ARM64
source "drivers/cpuidle/Kconfig.arm"
@@ -62,6 +66,11 @@ depends on PPC
source "drivers/cpuidle/Kconfig.powerpc"
endmenu
+menu "RISC-V CPU Idle Drivers"
+depends on RISCV
+source "drivers/cpuidle/Kconfig.riscv"
+endmenu
+
config HALTPOLL_CPUIDLE
tristate "Halt poll cpuidle driver"
depends on X86 && KVM_GUEST
diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm
index 15d6c46c0a47..be7f512109f7 100644
--- a/drivers/cpuidle/Kconfig.arm
+++ b/drivers/cpuidle/Kconfig.arm
@@ -27,6 +27,7 @@ config ARM_PSCI_CPUIDLE_DOMAIN
bool "PSCI CPU idle Domain"
depends on ARM_PSCI_CPUIDLE
depends on PM_GENERIC_DOMAINS_OF
+ select DT_IDLE_GENPD
default y
help
Select this to enable the PSCI based CPUidle driver to use PM domains,
diff --git a/drivers/cpuidle/Kconfig.riscv b/drivers/cpuidle/Kconfig.riscv
new file mode 100644
index 000000000000..78518c26af74
--- /dev/null
+++ b/drivers/cpuidle/Kconfig.riscv
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# RISC-V CPU Idle drivers
+#
+
+config RISCV_SBI_CPUIDLE
+ bool "RISC-V SBI CPU idle Driver"
+ depends on RISCV_SBI
+ select DT_IDLE_STATES
+ select CPU_IDLE_MULTIPLE_DRIVERS
+ select DT_IDLE_GENPD if PM_GENERIC_DOMAINS_OF
+ help
+ Select this option to enable RISC-V SBI firmware based CPU idle
+ driver for RISC-V systems. This drivers also supports hierarchical
+ DT based layout of the idle state.
diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile
index 26bbc5e74123..d103342b7cfc 100644
--- a/drivers/cpuidle/Makefile
+++ b/drivers/cpuidle/Makefile
@@ -6,6 +6,7 @@
obj-y += cpuidle.o driver.o governor.o sysfs.o governors/
obj-$(CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED) += coupled.o
obj-$(CONFIG_DT_IDLE_STATES) += dt_idle_states.o
+obj-$(CONFIG_DT_IDLE_GENPD) += dt_idle_genpd.o
obj-$(CONFIG_ARCH_HAS_CPU_RELAX) += poll_state.o
obj-$(CONFIG_HALTPOLL_CPUIDLE) += cpuidle-haltpoll.o
@@ -34,3 +35,7 @@ obj-$(CONFIG_MIPS_CPS_CPUIDLE) += cpuidle-cps.o
# POWERPC drivers
obj-$(CONFIG_PSERIES_CPUIDLE) += cpuidle-pseries.o
obj-$(CONFIG_POWERNV_CPUIDLE) += cpuidle-powernv.o
+
+###############################################################################
+# RISC-V drivers
+obj-$(CONFIG_RISCV_SBI_CPUIDLE) += cpuidle-riscv-sbi.o
diff --git a/drivers/cpuidle/cpuidle-psci-domain.c b/drivers/cpuidle/cpuidle-psci-domain.c
index ff2c3f8e4668..755bbdfc5b82 100644
--- a/drivers/cpuidle/cpuidle-psci-domain.c
+++ b/drivers/cpuidle/cpuidle-psci-domain.c
@@ -47,73 +47,14 @@ static int psci_pd_power_off(struct generic_pm_domain *pd)
return 0;
}
-static int psci_pd_parse_state_nodes(struct genpd_power_state *states,
- int state_count)
-{
- int i, ret;
- u32 psci_state, *psci_state_buf;
-
- for (i = 0; i < state_count; i++) {
- ret = psci_dt_parse_state_node(to_of_node(states[i].fwnode),
- &psci_state);
- if (ret)
- goto free_state;
-
- psci_state_buf = kmalloc(sizeof(u32), GFP_KERNEL);
- if (!psci_state_buf) {
- ret = -ENOMEM;
- goto free_state;
- }
- *psci_state_buf = psci_state;
- states[i].data = psci_state_buf;
- }
-
- return 0;
-
-free_state:
- i--;
- for (; i >= 0; i--)
- kfree(states[i].data);
- return ret;
-}
-
-static int psci_pd_parse_states(struct device_node *np,
- struct genpd_power_state **states, int *state_count)
-{
- int ret;
-
- /* Parse the domain idle states. */
- ret = of_genpd_parse_idle_states(np, states, state_count);
- if (ret)
- return ret;
-
- /* Fill out the PSCI specifics for each found state. */
- ret = psci_pd_parse_state_nodes(*states, *state_count);
- if (ret)
- kfree(*states);
-
- return ret;
-}
-
-static void psci_pd_free_states(struct genpd_power_state *states,
- unsigned int state_count)
-{
- int i;
-
- for (i = 0; i < state_count; i++)
- kfree(states[i].data);
- kfree(states);
-}
-
static int psci_pd_init(struct device_node *np, bool use_osi)
{
struct generic_pm_domain *pd;
struct psci_pd_provider *pd_provider;
struct dev_power_governor *pd_gov;
- struct genpd_power_state *states = NULL;
int ret = -ENOMEM, state_count = 0;
- pd = kzalloc(sizeof(*pd), GFP_KERNEL);
+ pd = dt_idle_pd_alloc(np, psci_dt_parse_state_node);
if (!pd)
goto out;
@@ -121,22 +62,6 @@ static int psci_pd_init(struct device_node *np, bool use_osi)
if (!pd_provider)
goto free_pd;
- pd->name = kasprintf(GFP_KERNEL, "%pOF", np);
- if (!pd->name)
- goto free_pd_prov;
-
- /*
- * Parse the domain idle states and let genpd manage the state selection
- * for those being compatible with "domain-idle-state".
- */
- ret = psci_pd_parse_states(np, &states, &state_count);
- if (ret)
- goto free_name;
-
- pd->free_states = psci_pd_free_states;
- pd->name = kbasename(pd->name);
- pd->states = states;
- pd->state_count = state_count;
pd->flags |= GENPD_FLAG_IRQ_SAFE | GENPD_FLAG_CPU_DOMAIN;
/* Allow power off when OSI has been successfully enabled. */
@@ -149,10 +74,8 @@ static int psci_pd_init(struct device_node *np, bool use_osi)
pd_gov = state_count > 0 ? &pm_domain_cpu_gov : NULL;
ret = pm_genpd_init(pd, pd_gov, false);
- if (ret) {
- psci_pd_free_states(states, state_count);
- goto free_name;
- }
+ if (ret)
+ goto free_pd_prov;
ret = of_genpd_add_provider_simple(np, pd);
if (ret)
@@ -166,12 +89,10 @@ static int psci_pd_init(struct device_node *np, bool use_osi)
remove_pd:
pm_genpd_remove(pd);
-free_name:
- kfree(pd->name);
free_pd_prov:
kfree(pd_provider);
free_pd:
- kfree(pd);
+ dt_idle_pd_free(pd);
out:
pr_err("failed to init PM domain ret=%d %pOF\n", ret, np);
return ret;
@@ -195,30 +116,6 @@ static void psci_pd_remove(void)
}
}
-static int psci_pd_init_topology(struct device_node *np)
-{
- struct device_node *node;
- struct of_phandle_args child, parent;
- int ret;
-
- for_each_child_of_node(np, node) {
- if (of_parse_phandle_with_args(node, "power-domains",
- "#power-domain-cells", 0, &parent))
- continue;
-
- child.np = node;
- child.args_count = 0;
- ret = of_genpd_add_subdomain(&parent, &child);
- of_node_put(parent.np);
- if (ret) {
- of_node_put(node);
- return ret;
- }
- }
-
- return 0;
-}
-
static bool psci_pd_try_set_osi_mode(void)
{
int ret;
@@ -282,7 +179,7 @@ static int psci_cpuidle_domain_probe(struct platform_device *pdev)
goto no_pd;
/* Link genpd masters/subdomains to model the CPU topology. */
- ret = psci_pd_init_topology(np);
+ ret = dt_idle_pd_init_topology(np);
if (ret)
goto remove_pd;
@@ -314,28 +211,3 @@ static int __init psci_idle_init_domains(void)
return platform_driver_register(&psci_cpuidle_domain_driver);
}
subsys_initcall(psci_idle_init_domains);
-
-struct device *psci_dt_attach_cpu(int cpu)
-{
- struct device *dev;
-
- dev = dev_pm_domain_attach_by_name(get_cpu_device(cpu), "psci");
- if (IS_ERR_OR_NULL(dev))
- return dev;
-
- pm_runtime_irq_safe(dev);
- if (cpu_online(cpu))
- pm_runtime_get_sync(dev);
-
- dev_pm_syscore_device(dev, true);
-
- return dev;
-}
-
-void psci_dt_detach_cpu(struct device *dev)
-{
- if (IS_ERR_OR_NULL(dev))
- return;
-
- dev_pm_domain_detach(dev, false);
-}
diff --git a/drivers/cpuidle/cpuidle-psci.h b/drivers/cpuidle/cpuidle-psci.h
index d8e925e84c27..4e132640ed64 100644
--- a/drivers/cpuidle/cpuidle-psci.h
+++ b/drivers/cpuidle/cpuidle-psci.h
@@ -10,8 +10,19 @@ void psci_set_domain_state(u32 state);
int psci_dt_parse_state_node(struct device_node *np, u32 *state);
#ifdef CONFIG_ARM_PSCI_CPUIDLE_DOMAIN
-struct device *psci_dt_attach_cpu(int cpu);
-void psci_dt_detach_cpu(struct device *dev);
+
+#include "dt_idle_genpd.h"
+
+static inline struct device *psci_dt_attach_cpu(int cpu)
+{
+ return dt_idle_attach_cpu(cpu, "psci");
+}
+
+static inline void psci_dt_detach_cpu(struct device *dev)
+{
+ dt_idle_detach_cpu(dev);
+}
+
#else
static inline struct device *psci_dt_attach_cpu(int cpu) { return NULL; }
static inline void psci_dt_detach_cpu(struct device *dev) { }
diff --git a/drivers/cpuidle/cpuidle-riscv-sbi.c b/drivers/cpuidle/cpuidle-riscv-sbi.c
new file mode 100644
index 000000000000..b459eda2cd37
--- /dev/null
+++ b/drivers/cpuidle/cpuidle-riscv-sbi.c
@@ -0,0 +1,627 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * RISC-V SBI CPU idle driver.
+ *
+ * Copyright (c) 2021 Western Digital Corporation or its affiliates.
+ * Copyright (c) 2022 Ventana Micro Systems Inc.
+ */
+
+#define pr_fmt(fmt) "cpuidle-riscv-sbi: " fmt
+
+#include <linux/cpuidle.h>
+#include <linux/cpumask.h>
+#include <linux/cpu_pm.h>
+#include <linux/cpu_cooling.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
+#include <asm/cpuidle.h>
+#include <asm/sbi.h>
+#include <asm/suspend.h>
+
+#include "dt_idle_states.h"
+#include "dt_idle_genpd.h"
+
+struct sbi_cpuidle_data {
+ u32 *states;
+ struct device *dev;
+};
+
+struct sbi_domain_state {
+ bool available;
+ u32 state;
+};
+
+static DEFINE_PER_CPU_READ_MOSTLY(struct sbi_cpuidle_data, sbi_cpuidle_data);
+static DEFINE_PER_CPU(struct sbi_domain_state, domain_state);
+static bool sbi_cpuidle_use_osi;
+static bool sbi_cpuidle_use_cpuhp;
+static bool sbi_cpuidle_pd_allow_domain_state;
+
+static inline void sbi_set_domain_state(u32 state)
+{
+ struct sbi_domain_state *data = this_cpu_ptr(&domain_state);
+
+ data->available = true;
+ data->state = state;
+}
+
+static inline u32 sbi_get_domain_state(void)
+{
+ struct sbi_domain_state *data = this_cpu_ptr(&domain_state);
+
+ return data->state;
+}
+
+static inline void sbi_clear_domain_state(void)
+{
+ struct sbi_domain_state *data = this_cpu_ptr(&domain_state);
+
+ data->available = false;
+}
+
+static inline bool sbi_is_domain_state_available(void)
+{
+ struct sbi_domain_state *data = this_cpu_ptr(&domain_state);
+
+ return data->available;
+}
+
+static int sbi_suspend_finisher(unsigned long suspend_type,
+ unsigned long resume_addr,
+ unsigned long opaque)
+{
+ struct sbiret ret;
+
+ ret = sbi_ecall(SBI_EXT_HSM, SBI_EXT_HSM_HART_SUSPEND,
+ suspend_type, resume_addr, opaque, 0, 0, 0);
+
+ return (ret.error) ? sbi_err_map_linux_errno(ret.error) : 0;
+}
+
+static int sbi_suspend(u32 state)
+{
+ if (state & SBI_HSM_SUSP_NON_RET_BIT)
+ return cpu_suspend(state, sbi_suspend_finisher);
+ else
+ return sbi_suspend_finisher(state, 0, 0);
+}
+
+static int sbi_cpuidle_enter_state(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int idx)
+{
+ u32 *states = __this_cpu_read(sbi_cpuidle_data.states);
+
+ return CPU_PM_CPU_IDLE_ENTER_PARAM(sbi_suspend, idx, states[idx]);
+}
+
+static int __sbi_enter_domain_idle_state(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int idx,
+ bool s2idle)
+{
+ struct sbi_cpuidle_data *data = this_cpu_ptr(&sbi_cpuidle_data);
+ u32 *states = data->states;
+ struct device *pd_dev = data->dev;
+ u32 state;
+ int ret;
+
+ ret = cpu_pm_enter();
+ if (ret)
+ return -1;
+
+ /* Do runtime PM to manage a hierarchical CPU toplogy. */
+ rcu_irq_enter_irqson();
+ if (s2idle)
+ dev_pm_genpd_suspend(pd_dev);
+ else
+ pm_runtime_put_sync_suspend(pd_dev);
+ rcu_irq_exit_irqson();
+
+ if (sbi_is_domain_state_available())
+ state = sbi_get_domain_state();
+ else
+ state = states[idx];
+
+ ret = sbi_suspend(state) ? -1 : idx;
+
+ rcu_irq_enter_irqson();
+ if (s2idle)
+ dev_pm_genpd_resume(pd_dev);
+ else
+ pm_runtime_get_sync(pd_dev);
+ rcu_irq_exit_irqson();
+
+ cpu_pm_exit();
+
+ /* Clear the domain state to start fresh when back from idle. */
+ sbi_clear_domain_state();
+ return ret;
+}
+
+static int sbi_enter_domain_idle_state(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int idx)
+{
+ return __sbi_enter_domain_idle_state(dev, drv, idx, false);
+}
+
+static int sbi_enter_s2idle_domain_idle_state(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv,
+ int idx)
+{
+ return __sbi_enter_domain_idle_state(dev, drv, idx, true);
+}
+
+static int sbi_cpuidle_cpuhp_up(unsigned int cpu)
+{
+ struct device *pd_dev = __this_cpu_read(sbi_cpuidle_data.dev);
+
+ if (pd_dev)
+ pm_runtime_get_sync(pd_dev);
+
+ return 0;
+}
+
+static int sbi_cpuidle_cpuhp_down(unsigned int cpu)
+{
+ struct device *pd_dev = __this_cpu_read(sbi_cpuidle_data.dev);
+
+ if (pd_dev) {
+ pm_runtime_put_sync(pd_dev);
+ /* Clear domain state to start fresh at next online. */
+ sbi_clear_domain_state();
+ }
+
+ return 0;
+}
+
+static void sbi_idle_init_cpuhp(void)
+{
+ int err;
+
+ if (!sbi_cpuidle_use_cpuhp)
+ return;
+
+ err = cpuhp_setup_state_nocalls(CPUHP_AP_CPU_PM_STARTING,
+ "cpuidle/sbi:online",
+ sbi_cpuidle_cpuhp_up,
+ sbi_cpuidle_cpuhp_down);
+ if (err)
+ pr_warn("Failed %d while setup cpuhp state\n", err);
+}
+
+static const struct of_device_id sbi_cpuidle_state_match[] = {
+ { .compatible = "riscv,idle-state",
+ .data = sbi_cpuidle_enter_state },
+ { },
+};
+
+static bool sbi_suspend_state_is_valid(u32 state)
+{
+ if (state > SBI_HSM_SUSPEND_RET_DEFAULT &&
+ state < SBI_HSM_SUSPEND_RET_PLATFORM)
+ return false;
+ if (state > SBI_HSM_SUSPEND_NON_RET_DEFAULT &&
+ state < SBI_HSM_SUSPEND_NON_RET_PLATFORM)
+ return false;
+ return true;
+}
+
+static int sbi_dt_parse_state_node(struct device_node *np, u32 *state)
+{
+ int err = of_property_read_u32(np, "riscv,sbi-suspend-param", state);
+
+ if (err) {
+ pr_warn("%pOF missing riscv,sbi-suspend-param property\n", np);
+ return err;
+ }
+
+ if (!sbi_suspend_state_is_valid(*state)) {
+ pr_warn("Invalid SBI suspend state %#x\n", *state);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int sbi_dt_cpu_init_topology(struct cpuidle_driver *drv,
+ struct sbi_cpuidle_data *data,
+ unsigned int state_count, int cpu)
+{
+ /* Currently limit the hierarchical topology to be used in OSI mode. */
+ if (!sbi_cpuidle_use_osi)
+ return 0;
+
+ data->dev = dt_idle_attach_cpu(cpu, "sbi");
+ if (IS_ERR_OR_NULL(data->dev))
+ return PTR_ERR_OR_ZERO(data->dev);
+
+ /*
+ * Using the deepest state for the CPU to trigger a potential selection
+ * of a shared state for the domain, assumes the domain states are all
+ * deeper states.
+ */
+ drv->states[state_count - 1].enter = sbi_enter_domain_idle_state;
+ drv->states[state_count - 1].enter_s2idle =
+ sbi_enter_s2idle_domain_idle_state;
+ sbi_cpuidle_use_cpuhp = true;
+
+ return 0;
+}
+
+static int sbi_cpuidle_dt_init_states(struct device *dev,
+ struct cpuidle_driver *drv,
+ unsigned int cpu,
+ unsigned int state_count)
+{
+ struct sbi_cpuidle_data *data = per_cpu_ptr(&sbi_cpuidle_data, cpu);
+ struct device_node *state_node;
+ struct device_node *cpu_node;
+ u32 *states;
+ int i, ret;
+
+ cpu_node = of_cpu_device_node_get(cpu);
+ if (!cpu_node)
+ return -ENODEV;
+
+ states = devm_kcalloc(dev, state_count, sizeof(*states), GFP_KERNEL);
+ if (!states) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ /* Parse SBI specific details from state DT nodes */
+ for (i = 1; i < state_count; i++) {
+ state_node = of_get_cpu_state_node(cpu_node, i - 1);
+ if (!state_node)
+ break;
+
+ ret = sbi_dt_parse_state_node(state_node, &states[i]);
+ of_node_put(state_node);
+
+ if (ret)
+ return ret;
+
+ pr_debug("sbi-state %#x index %d\n", states[i], i);
+ }
+ if (i != state_count) {
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ /* Initialize optional data, used for the hierarchical topology. */
+ ret = sbi_dt_cpu_init_topology(drv, data, state_count, cpu);
+ if (ret < 0)
+ return ret;
+
+ /* Store states in the per-cpu struct. */
+ data->states = states;
+
+fail:
+ of_node_put(cpu_node);
+
+ return ret;
+}
+
+static void sbi_cpuidle_deinit_cpu(int cpu)
+{
+ struct sbi_cpuidle_data *data = per_cpu_ptr(&sbi_cpuidle_data, cpu);
+
+ dt_idle_detach_cpu(data->dev);
+ sbi_cpuidle_use_cpuhp = false;
+}
+
+static int sbi_cpuidle_init_cpu(struct device *dev, int cpu)
+{
+ struct cpuidle_driver *drv;
+ unsigned int state_count = 0;
+ int ret = 0;
+
+ drv = devm_kzalloc(dev, sizeof(*drv), GFP_KERNEL);
+ if (!drv)
+ return -ENOMEM;
+
+ drv->name = "sbi_cpuidle";
+ drv->owner = THIS_MODULE;
+ drv->cpumask = (struct cpumask *)cpumask_of(cpu);
+
+ /* RISC-V architectural WFI to be represented as state index 0. */
+ drv->states[0].enter = sbi_cpuidle_enter_state;
+ drv->states[0].exit_latency = 1;
+ drv->states[0].target_residency = 1;
+ drv->states[0].power_usage = UINT_MAX;
+ strcpy(drv->states[0].name, "WFI");
+ strcpy(drv->states[0].desc, "RISC-V WFI");
+
+ /*
+ * If no DT idle states are detected (ret == 0) let the driver
+ * initialization fail accordingly since there is no reason to
+ * initialize the idle driver if only wfi is supported, the
+ * default archictectural back-end already executes wfi
+ * on idle entry.
+ */
+ ret = dt_init_idle_driver(drv, sbi_cpuidle_state_match, 1);
+ if (ret <= 0) {
+ pr_debug("HART%ld: failed to parse DT idle states\n",
+ cpuid_to_hartid_map(cpu));
+ return ret ? : -ENODEV;
+ }
+ state_count = ret + 1; /* Include WFI state as well */
+
+ /* Initialize idle states from DT. */
+ ret = sbi_cpuidle_dt_init_states(dev, drv, cpu, state_count);
+ if (ret) {
+ pr_err("HART%ld: failed to init idle states\n",
+ cpuid_to_hartid_map(cpu));
+ return ret;
+ }
+
+ ret = cpuidle_register(drv, NULL);
+ if (ret)
+ goto deinit;
+
+ cpuidle_cooling_register(drv);
+
+ return 0;
+deinit:
+ sbi_cpuidle_deinit_cpu(cpu);
+ return ret;
+}
+
+static void sbi_cpuidle_domain_sync_state(struct device *dev)
+{
+ /*
+ * All devices have now been attached/probed to the PM domain
+ * topology, hence it's fine to allow domain states to be picked.
+ */
+ sbi_cpuidle_pd_allow_domain_state = true;
+}
+
+#ifdef CONFIG_DT_IDLE_GENPD
+
+static int sbi_cpuidle_pd_power_off(struct generic_pm_domain *pd)
+{
+ struct genpd_power_state *state = &pd->states[pd->state_idx];
+ u32 *pd_state;
+
+ if (!state->data)
+ return 0;
+
+ if (!sbi_cpuidle_pd_allow_domain_state)
+ return -EBUSY;
+
+ /* OSI mode is enabled, set the corresponding domain state. */
+ pd_state = state->data;
+ sbi_set_domain_state(*pd_state);
+
+ return 0;
+}
+
+struct sbi_pd_provider {
+ struct list_head link;
+ struct device_node *node;
+};
+
+static LIST_HEAD(sbi_pd_providers);
+
+static int sbi_pd_init(struct device_node *np)
+{
+ struct generic_pm_domain *pd;
+ struct sbi_pd_provider *pd_provider;
+ struct dev_power_governor *pd_gov;
+ int ret = -ENOMEM, state_count = 0;
+
+ pd = dt_idle_pd_alloc(np, sbi_dt_parse_state_node);
+ if (!pd)
+ goto out;
+
+ pd_provider = kzalloc(sizeof(*pd_provider), GFP_KERNEL);
+ if (!pd_provider)
+ goto free_pd;
+
+ pd->flags |= GENPD_FLAG_IRQ_SAFE | GENPD_FLAG_CPU_DOMAIN;
+
+ /* Allow power off when OSI is available. */
+ if (sbi_cpuidle_use_osi)
+ pd->power_off = sbi_cpuidle_pd_power_off;
+ else
+ pd->flags |= GENPD_FLAG_ALWAYS_ON;
+
+ /* Use governor for CPU PM domains if it has some states to manage. */
+ pd_gov = state_count > 0 ? &pm_domain_cpu_gov : NULL;
+
+ ret = pm_genpd_init(pd, pd_gov, false);
+ if (ret)
+ goto free_pd_prov;
+
+ ret = of_genpd_add_provider_simple(np, pd);
+ if (ret)
+ goto remove_pd;
+
+ pd_provider->node = of_node_get(np);
+ list_add(&pd_provider->link, &sbi_pd_providers);
+
+ pr_debug("init PM domain %s\n", pd->name);
+ return 0;
+
+remove_pd:
+ pm_genpd_remove(pd);
+free_pd_prov:
+ kfree(pd_provider);
+free_pd:
+ dt_idle_pd_free(pd);
+out:
+ pr_err("failed to init PM domain ret=%d %pOF\n", ret, np);
+ return ret;
+}
+
+static void sbi_pd_remove(void)
+{
+ struct sbi_pd_provider *pd_provider, *it;
+ struct generic_pm_domain *genpd;
+
+ list_for_each_entry_safe(pd_provider, it, &sbi_pd_providers, link) {
+ of_genpd_del_provider(pd_provider->node);
+
+ genpd = of_genpd_remove_last(pd_provider->node);
+ if (!IS_ERR(genpd))
+ kfree(genpd);
+
+ of_node_put(pd_provider->node);
+ list_del(&pd_provider->link);
+ kfree(pd_provider);
+ }
+}
+
+static int sbi_genpd_probe(struct device_node *np)
+{
+ struct device_node *node;
+ int ret = 0, pd_count = 0;
+
+ if (!np)
+ return -ENODEV;
+
+ /*
+ * Parse child nodes for the "#power-domain-cells" property and
+ * initialize a genpd/genpd-of-provider pair when it's found.
+ */
+ for_each_child_of_node(np, node) {
+ if (!of_find_property(node, "#power-domain-cells", NULL))
+ continue;
+
+ ret = sbi_pd_init(node);
+ if (ret)
+ goto put_node;
+
+ pd_count++;
+ }
+
+ /* Bail out if not using the hierarchical CPU topology. */
+ if (!pd_count)
+ goto no_pd;
+
+ /* Link genpd masters/subdomains to model the CPU topology. */
+ ret = dt_idle_pd_init_topology(np);
+ if (ret)
+ goto remove_pd;
+
+ return 0;
+
+put_node:
+ of_node_put(node);
+remove_pd:
+ sbi_pd_remove();
+ pr_err("failed to create CPU PM domains ret=%d\n", ret);
+no_pd:
+ return ret;
+}
+
+#else
+
+static inline int sbi_genpd_probe(struct device_node *np)
+{
+ return 0;
+}
+
+#endif
+
+static int sbi_cpuidle_probe(struct platform_device *pdev)
+{
+ int cpu, ret;
+ struct cpuidle_driver *drv;
+ struct cpuidle_device *dev;
+ struct device_node *np, *pds_node;
+
+ /* Detect OSI support based on CPU DT nodes */
+ sbi_cpuidle_use_osi = true;
+ for_each_possible_cpu(cpu) {
+ np = of_cpu_device_node_get(cpu);
+ if (np &&
+ of_find_property(np, "power-domains", NULL) &&
+ of_find_property(np, "power-domain-names", NULL)) {
+ continue;
+ } else {
+ sbi_cpuidle_use_osi = false;
+ break;
+ }
+ }
+
+ /* Populate generic power domains from DT nodes */
+ pds_node = of_find_node_by_path("/cpus/power-domains");
+ if (pds_node) {
+ ret = sbi_genpd_probe(pds_node);
+ of_node_put(pds_node);
+ if (ret)
+ return ret;
+ }
+
+ /* Initialize CPU idle driver for each CPU */
+ for_each_possible_cpu(cpu) {
+ ret = sbi_cpuidle_init_cpu(&pdev->dev, cpu);
+ if (ret) {
+ pr_debug("HART%ld: idle driver init failed\n",
+ cpuid_to_hartid_map(cpu));
+ goto out_fail;
+ }
+ }
+
+ /* Setup CPU hotplut notifiers */
+ sbi_idle_init_cpuhp();
+
+ pr_info("idle driver registered for all CPUs\n");
+
+ return 0;
+
+out_fail:
+ while (--cpu >= 0) {
+ dev = per_cpu(cpuidle_devices, cpu);
+ drv = cpuidle_get_cpu_driver(dev);
+ cpuidle_unregister(drv);
+ sbi_cpuidle_deinit_cpu(cpu);
+ }
+
+ return ret;
+}
+
+static struct platform_driver sbi_cpuidle_driver = {
+ .probe = sbi_cpuidle_probe,
+ .driver = {
+ .name = "sbi-cpuidle",
+ .sync_state = sbi_cpuidle_domain_sync_state,
+ },
+};
+
+static int __init sbi_cpuidle_init(void)
+{
+ int ret;
+ struct platform_device *pdev;
+
+ /*
+ * The SBI HSM suspend function is only available when:
+ * 1) SBI version is 0.3 or higher
+ * 2) SBI HSM extension is available
+ */
+ if ((sbi_spec_version < sbi_mk_version(0, 3)) ||
+ sbi_probe_extension(SBI_EXT_HSM) <= 0) {
+ pr_info("HSM suspend not available\n");
+ return 0;
+ }
+
+ ret = platform_driver_register(&sbi_cpuidle_driver);
+ if (ret)
+ return ret;
+
+ pdev = platform_device_register_simple("sbi-cpuidle",
+ -1, NULL, 0);
+ if (IS_ERR(pdev)) {
+ platform_driver_unregister(&sbi_cpuidle_driver);
+ return PTR_ERR(pdev);
+ }
+
+ return 0;
+}
+device_initcall(sbi_cpuidle_init);
diff --git a/drivers/cpuidle/dt_idle_genpd.c b/drivers/cpuidle/dt_idle_genpd.c
new file mode 100644
index 000000000000..b37165514d4e
--- /dev/null
+++ b/drivers/cpuidle/dt_idle_genpd.c
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * PM domains for CPUs via genpd.
+ *
+ * Copyright (C) 2019 Linaro Ltd.
+ * Author: Ulf Hansson <ulf.hansson@linaro.org>
+ *
+ * Copyright (c) 2021 Western Digital Corporation or its affiliates.
+ * Copyright (c) 2022 Ventana Micro Systems Inc.
+ */
+
+#define pr_fmt(fmt) "dt-idle-genpd: " fmt
+
+#include <linux/cpu.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+#include "dt_idle_genpd.h"
+
+static int pd_parse_state_nodes(
+ int (*parse_state)(struct device_node *, u32 *),
+ struct genpd_power_state *states, int state_count)
+{
+ int i, ret;
+ u32 state, *state_buf;
+
+ for (i = 0; i < state_count; i++) {
+ ret = parse_state(to_of_node(states[i].fwnode), &state);
+ if (ret)
+ goto free_state;
+
+ state_buf = kmalloc(sizeof(u32), GFP_KERNEL);
+ if (!state_buf) {
+ ret = -ENOMEM;
+ goto free_state;
+ }
+ *state_buf = state;
+ states[i].data = state_buf;
+ }
+
+ return 0;
+
+free_state:
+ i--;
+ for (; i >= 0; i--)
+ kfree(states[i].data);
+ return ret;
+}
+
+static int pd_parse_states(struct device_node *np,
+ int (*parse_state)(struct device_node *, u32 *),
+ struct genpd_power_state **states,
+ int *state_count)
+{
+ int ret;
+
+ /* Parse the domain idle states. */
+ ret = of_genpd_parse_idle_states(np, states, state_count);
+ if (ret)
+ return ret;
+
+ /* Fill out the dt specifics for each found state. */
+ ret = pd_parse_state_nodes(parse_state, *states, *state_count);
+ if (ret)
+ kfree(*states);
+
+ return ret;
+}
+
+static void pd_free_states(struct genpd_power_state *states,
+ unsigned int state_count)
+{
+ int i;
+
+ for (i = 0; i < state_count; i++)
+ kfree(states[i].data);
+ kfree(states);
+}
+
+void dt_idle_pd_free(struct generic_pm_domain *pd)
+{
+ pd_free_states(pd->states, pd->state_count);
+ kfree(pd->name);
+ kfree(pd);
+}
+
+struct generic_pm_domain *dt_idle_pd_alloc(struct device_node *np,
+ int (*parse_state)(struct device_node *, u32 *))
+{
+ struct generic_pm_domain *pd;
+ struct genpd_power_state *states = NULL;
+ int ret, state_count = 0;
+
+ pd = kzalloc(sizeof(*pd), GFP_KERNEL);
+ if (!pd)
+ goto out;
+
+ pd->name = kasprintf(GFP_KERNEL, "%pOF", np);
+ if (!pd->name)
+ goto free_pd;
+
+ /*
+ * Parse the domain idle states and let genpd manage the state selection
+ * for those being compatible with "domain-idle-state".
+ */
+ ret = pd_parse_states(np, parse_state, &states, &state_count);
+ if (ret)
+ goto free_name;
+
+ pd->free_states = pd_free_states;
+ pd->name = kbasename(pd->name);
+ pd->states = states;
+ pd->state_count = state_count;
+
+ pr_debug("alloc PM domain %s\n", pd->name);
+ return pd;
+
+free_name:
+ kfree(pd->name);
+free_pd:
+ kfree(pd);
+out:
+ pr_err("failed to alloc PM domain %pOF\n", np);
+ return NULL;
+}
+
+int dt_idle_pd_init_topology(struct device_node *np)
+{
+ struct device_node *node;
+ struct of_phandle_args child, parent;
+ int ret;
+
+ for_each_child_of_node(np, node) {
+ if (of_parse_phandle_with_args(node, "power-domains",
+ "#power-domain-cells", 0, &parent))
+ continue;
+
+ child.np = node;
+ child.args_count = 0;
+ ret = of_genpd_add_subdomain(&parent, &child);
+ of_node_put(parent.np);
+ if (ret) {
+ of_node_put(node);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+struct device *dt_idle_attach_cpu(int cpu, const char *name)
+{
+ struct device *dev;
+
+ dev = dev_pm_domain_attach_by_name(get_cpu_device(cpu), name);
+ if (IS_ERR_OR_NULL(dev))
+ return dev;
+
+ pm_runtime_irq_safe(dev);
+ if (cpu_online(cpu))
+ pm_runtime_get_sync(dev);
+
+ dev_pm_syscore_device(dev, true);
+
+ return dev;
+}
+
+void dt_idle_detach_cpu(struct device *dev)
+{
+ if (IS_ERR_OR_NULL(dev))
+ return;
+
+ dev_pm_domain_detach(dev, false);
+}
diff --git a/drivers/cpuidle/dt_idle_genpd.h b/drivers/cpuidle/dt_idle_genpd.h
new file mode 100644
index 000000000000..a95483d08a02
--- /dev/null
+++ b/drivers/cpuidle/dt_idle_genpd.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __DT_IDLE_GENPD
+#define __DT_IDLE_GENPD
+
+struct device_node;
+struct generic_pm_domain;
+
+#ifdef CONFIG_DT_IDLE_GENPD
+
+void dt_idle_pd_free(struct generic_pm_domain *pd);
+
+struct generic_pm_domain *dt_idle_pd_alloc(struct device_node *np,
+ int (*parse_state)(struct device_node *, u32 *));
+
+int dt_idle_pd_init_topology(struct device_node *np);
+
+struct device *dt_idle_attach_cpu(int cpu, const char *name);
+
+void dt_idle_detach_cpu(struct device *dev);
+
+#else
+
+static inline void dt_idle_pd_free(struct generic_pm_domain *pd)
+{
+}
+
+static inline struct generic_pm_domain *dt_idle_pd_alloc(
+ struct device_node *np,
+ int (*parse_state)(struct device_node *, u32 *))
+{
+ return NULL;
+}
+
+static inline int dt_idle_pd_init_topology(struct device_node *np)
+{
+ return 0;
+}
+
+static inline struct device *dt_idle_attach_cpu(int cpu, const char *name)
+{
+ return NULL;
+}
+
+static inline void dt_idle_detach_cpu(struct device *dev)
+{
+}
+
+#endif
+
+#endif
diff --git a/drivers/gpio/gpio-ts4900.c b/drivers/gpio/gpio-ts4900.c
index 69854fd2382a..416725c26e94 100644
--- a/drivers/gpio/gpio-ts4900.c
+++ b/drivers/gpio/gpio-ts4900.c
@@ -47,8 +47,9 @@ static int ts4900_gpio_direction_input(struct gpio_chip *chip,
{
struct ts4900_gpio_priv *priv = gpiochip_get_data(chip);
- /* Only clear the OE bit here, requires a RMW. Prevents potential issue
- * with OE and data getting to the physical pin at different times.
+ /*
+ * Only clear the OE bit here, requires a RMW. Prevents a potential issue
+ * with OE and DAT getting to the physical pin at different times.
*/
return regmap_update_bits(priv->regmap, offset, TS4900_GPIO_OE, 0);
}
@@ -60,9 +61,10 @@ static int ts4900_gpio_direction_output(struct gpio_chip *chip,
unsigned int reg;
int ret;
- /* If changing from an input to an output, we need to first set the
- * proper data bit to what is requested and then set OE bit. This
- * prevents a glitch that can occur on the IO line
+ /*
+ * If changing from an input to an output, we need to first set the
+ * GPIO's DAT bit to what is requested and then set the OE bit. This
+ * prevents a glitch that can occur on the IO line.
*/
regmap_read(priv->regmap, offset, &reg);
if (!(reg & TS4900_GPIO_OE)) {
diff --git a/drivers/gpio/gpio-ts5500.c b/drivers/gpio/gpio-ts5500.c
index b159e92a3612..8e03614c7a24 100644
--- a/drivers/gpio/gpio-ts5500.c
+++ b/drivers/gpio/gpio-ts5500.c
@@ -11,11 +11,11 @@
* Actually, the following platforms have DIO support:
*
* TS-5500:
- * Documentation: http://wiki.embeddedarm.com/wiki/TS-5500
+ * Documentation: https://docs.embeddedts.com/TS-5500
* Blocks: DIO1, DIO2 and LCD port.
*
* TS-5600:
- * Documentation: http://wiki.embeddedarm.com/wiki/TS-5600
+ * Documentation: https://docs.embeddedts.com/TS-5600
* Blocks: LCD port (identical to TS-5500 LCD).
*/
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 7a674873d794..a95a7cbc4a59 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -405,14 +405,25 @@ config HOLTEK_FF
Say Y here if you have a Holtek On Line Grip based game controller
and want to have force feedback support for it.
+config HID_VIVALDI_COMMON
+ tristate
+ help
+ ChromeOS Vivaldi HID parsing support library. This is a hidden
+ option so that drivers can use common code to parse the HID
+ descriptors for vivaldi function row keymap.
+
config HID_GOOGLE_HAMMER
tristate "Google Hammer Keyboard"
+ select HID_VIVALDI_COMMON
+ select INPUT_VIVALDIFMAP
depends on USB_HID && LEDS_CLASS && CROS_EC
help
Say Y here if you have a Google Hammer device.
config HID_VIVALDI
tristate "Vivaldi Keyboard"
+ select HID_VIVALDI_COMMON
+ select INPUT_VIVALDIFMAP
depends on HID
help
Say Y here if you want to enable support for Vivaldi keyboards.
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index d5ce8d747b14..345ac5581bd8 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -50,6 +50,7 @@ obj-$(CONFIG_HID_FT260) += hid-ft260.o
obj-$(CONFIG_HID_GEMBIRD) += hid-gembird.o
obj-$(CONFIG_HID_GFRM) += hid-gfrm.o
obj-$(CONFIG_HID_GLORIOUS) += hid-glorious.o
+obj-$(CONFIG_HID_VIVALDI_COMMON) += hid-vivaldi-common.o
obj-$(CONFIG_HID_GOOGLE_HAMMER) += hid-google-hammer.o
obj-$(CONFIG_HID_VIVALDI) += hid-vivaldi.o
obj-$(CONFIG_HID_GT683R) += hid-gt683r.o
diff --git a/drivers/hid/hid-google-hammer.c b/drivers/hid/hid-google-hammer.c
index ddbe0de177e2..ff40f1e55c21 100644
--- a/drivers/hid/hid-google-hammer.c
+++ b/drivers/hid/hid-google-hammer.c
@@ -15,6 +15,7 @@
#include <linux/acpi.h>
#include <linux/hid.h>
+#include <linux/input/vivaldi-fmap.h>
#include <linux/leds.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -25,6 +26,7 @@
#include <asm/unaligned.h>
#include "hid-ids.h"
+#include "hid-vivaldi-common.h"
/*
* C(hrome)B(ase)A(ttached)S(witch) - switch exported by Chrome EC and reporting
@@ -340,9 +342,9 @@ static int hammer_kbd_brightness_set_blocking(struct led_classdev *cdev,
static int hammer_register_leds(struct hid_device *hdev)
{
struct hammer_kbd_leds *kbd_backlight;
- int error;
- kbd_backlight = kzalloc(sizeof(*kbd_backlight), GFP_KERNEL);
+ kbd_backlight = devm_kzalloc(&hdev->dev, sizeof(*kbd_backlight),
+ GFP_KERNEL);
if (!kbd_backlight)
return -ENOMEM;
@@ -356,26 +358,7 @@ static int hammer_register_leds(struct hid_device *hdev)
/* Set backlight to 0% initially. */
hammer_kbd_brightness_set_blocking(&kbd_backlight->cdev, 0);
- error = led_classdev_register(&hdev->dev, &kbd_backlight->cdev);
- if (error)
- goto err_free_mem;
-
- hid_set_drvdata(hdev, kbd_backlight);
- return 0;
-
-err_free_mem:
- kfree(kbd_backlight);
- return error;
-}
-
-static void hammer_unregister_leds(struct hid_device *hdev)
-{
- struct hammer_kbd_leds *kbd_backlight = hid_get_drvdata(hdev);
-
- if (kbd_backlight) {
- led_classdev_unregister(&kbd_backlight->cdev);
- kfree(kbd_backlight);
- }
+ return devm_led_classdev_register(&hdev->dev, &kbd_backlight->cdev);
}
#define HID_UP_GOOGLEVENDOR 0xffd10000
@@ -512,11 +495,23 @@ out:
kfree(buf);
}
+static void hammer_stop(void *hdev)
+{
+ hid_hw_stop(hdev);
+}
+
static int hammer_probe(struct hid_device *hdev,
const struct hid_device_id *id)
{
+ struct vivaldi_data *vdata;
int error;
+ vdata = devm_kzalloc(&hdev->dev, sizeof(*vdata), GFP_KERNEL);
+ if (!vdata)
+ return -ENOMEM;
+
+ hid_set_drvdata(hdev, vdata);
+
error = hid_parse(hdev);
if (error)
return error;
@@ -525,6 +520,10 @@ static int hammer_probe(struct hid_device *hdev,
if (error)
return error;
+ error = devm_add_action(&hdev->dev, hammer_stop, hdev);
+ if (error)
+ return error;
+
/*
* We always want to poll for, and handle tablet mode events from
* devices that have folded usage, even when nobody has opened the input
@@ -577,15 +576,13 @@ static void hammer_remove(struct hid_device *hdev)
spin_unlock_irqrestore(&cbas_ec_lock, flags);
}
- hammer_unregister_leds(hdev);
-
- hid_hw_stop(hdev);
+ /* Unregistering LEDs and stopping the hardware is done via devm */
}
static const struct hid_device_id hammer_devices[] = {
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_DON) },
- { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+ { HID_DEVICE(BUS_USB, HID_GROUP_VIVALDI,
USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_EEL) },
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_HAMMER) },
@@ -610,6 +607,8 @@ static struct hid_driver hammer_driver = {
.id_table = hammer_devices,
.probe = hammer_probe,
.remove = hammer_remove,
+ .feature_mapping = vivaldi_feature_mapping,
+ .input_configured = vivaldi_input_configured,
.input_mapping = hammer_input_mapping,
.event = hammer_event,
};
diff --git a/drivers/hid/hid-vivaldi-common.c b/drivers/hid/hid-vivaldi-common.c
new file mode 100644
index 000000000000..8b3e515d0f06
--- /dev/null
+++ b/drivers/hid/hid-vivaldi-common.c
@@ -0,0 +1,140 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Helpers for ChromeOS HID Vivaldi keyboards
+ *
+ * Copyright (C) 2022 Google, Inc
+ */
+
+#include <linux/export.h>
+#include <linux/hid.h>
+#include <linux/input/vivaldi-fmap.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+
+#include "hid-vivaldi-common.h"
+
+#define MIN_FN_ROW_KEY 1
+#define MAX_FN_ROW_KEY VIVALDI_MAX_FUNCTION_ROW_KEYS
+#define HID_VD_FN_ROW_PHYSMAP 0x00000001
+#define HID_USAGE_FN_ROW_PHYSMAP (HID_UP_GOOGLEVENDOR | HID_VD_FN_ROW_PHYSMAP)
+
+/**
+ * vivaldi_feature_mapping - Fill out vivaldi keymap data exposed via HID
+ * @hdev: HID device to parse
+ * @field: HID field to parse
+ * @usage: HID usage to parse
+ *
+ * Note: this function assumes that driver data attached to @hdev contains an
+ * instance of &struct vivaldi_data at the very beginning.
+ */
+void vivaldi_feature_mapping(struct hid_device *hdev,
+ struct hid_field *field, struct hid_usage *usage)
+{
+ struct vivaldi_data *data = hid_get_drvdata(hdev);
+ struct hid_report *report = field->report;
+ u8 *report_data, *buf;
+ u32 report_len;
+ unsigned int fn_key;
+ int ret;
+
+ if (field->logical != HID_USAGE_FN_ROW_PHYSMAP ||
+ (usage->hid & HID_USAGE_PAGE) != HID_UP_ORDINAL)
+ return;
+
+ fn_key = usage->hid & HID_USAGE;
+ if (fn_key < MIN_FN_ROW_KEY || fn_key > MAX_FN_ROW_KEY)
+ return;
+
+ if (fn_key > data->num_function_row_keys)
+ data->num_function_row_keys = fn_key;
+
+ report_data = buf = hid_alloc_report_buf(report, GFP_KERNEL);
+ if (!report_data)
+ return;
+
+ report_len = hid_report_len(report);
+ if (!report->id) {
+ /*
+ * hid_hw_raw_request() will stuff report ID (which will be 0)
+ * into the first byte of the buffer even for unnumbered
+ * reports, so we need to account for this to avoid getting
+ * -EOVERFLOW in return.
+ * Note that hid_alloc_report_buf() adds 7 bytes to the size
+ * so we can safely say that we have space for an extra byte.
+ */
+ report_len++;
+ }
+
+ ret = hid_hw_raw_request(hdev, report->id, report_data,
+ report_len, HID_FEATURE_REPORT,
+ HID_REQ_GET_REPORT);
+ if (ret < 0) {
+ dev_warn(&hdev->dev, "failed to fetch feature %d\n",
+ field->report->id);
+ goto out;
+ }
+
+ if (!report->id) {
+ /*
+ * Undo the damage from hid_hw_raw_request() for unnumbered
+ * reports.
+ */
+ report_data++;
+ report_len--;
+ }
+
+ ret = hid_report_raw_event(hdev, HID_FEATURE_REPORT, report_data,
+ report_len, 0);
+ if (ret) {
+ dev_warn(&hdev->dev, "failed to report feature %d\n",
+ field->report->id);
+ goto out;
+ }
+
+ data->function_row_physmap[fn_key - MIN_FN_ROW_KEY] =
+ field->value[usage->usage_index];
+
+out:
+ kfree(buf);
+}
+EXPORT_SYMBOL_GPL(vivaldi_feature_mapping);
+
+static ssize_t function_row_physmap_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hid_device *hdev = to_hid_device(dev);
+ struct vivaldi_data *data = hid_get_drvdata(hdev);
+
+ return vivaldi_function_row_physmap_show(data, buf);
+}
+
+static DEVICE_ATTR_RO(function_row_physmap);
+static struct attribute *vivaldi_sysfs_attrs[] = {
+ &dev_attr_function_row_physmap.attr,
+ NULL
+};
+
+static const struct attribute_group vivaldi_attribute_group = {
+ .attrs = vivaldi_sysfs_attrs,
+};
+
+/**
+ * vivaldi_input_configured - Complete initialization of device using vivaldi map
+ * @hdev: HID device to which vivaldi attributes should be attached
+ * @hidinput: HID input device (unused)
+ */
+int vivaldi_input_configured(struct hid_device *hdev,
+ struct hid_input *hidinput)
+{
+ struct vivaldi_data *data = hid_get_drvdata(hdev);
+
+ if (!data->num_function_row_keys)
+ return 0;
+
+ return devm_device_add_group(&hdev->dev, &vivaldi_attribute_group);
+}
+EXPORT_SYMBOL_GPL(vivaldi_input_configured);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-vivaldi-common.h b/drivers/hid/hid-vivaldi-common.h
new file mode 100644
index 000000000000..d42e82d77825
--- /dev/null
+++ b/drivers/hid/hid-vivaldi-common.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _HID_VIVALDI_COMMON_H
+#define _HID_VIVALDI_COMMON_H
+
+struct hid_device;
+struct hid_field;
+struct hid_input;
+struct hid_usage;
+
+void vivaldi_feature_mapping(struct hid_device *hdev,
+ struct hid_field *field, struct hid_usage *usage);
+
+int vivaldi_input_configured(struct hid_device *hdev,
+ struct hid_input *hidinput);
+
+#endif /* _HID_VIVALDI_COMMON_H */
diff --git a/drivers/hid/hid-vivaldi.c b/drivers/hid/hid-vivaldi.c
index 42ceb2058a09..3a979123e7d3 100644
--- a/drivers/hid/hid-vivaldi.c
+++ b/drivers/hid/hid-vivaldi.c
@@ -8,48 +8,11 @@
#include <linux/device.h>
#include <linux/hid.h>
+#include <linux/input/vivaldi-fmap.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/sysfs.h>
-#define MIN_FN_ROW_KEY 1
-#define MAX_FN_ROW_KEY 24
-#define HID_VD_FN_ROW_PHYSMAP 0x00000001
-#define HID_USAGE_FN_ROW_PHYSMAP (HID_UP_GOOGLEVENDOR | HID_VD_FN_ROW_PHYSMAP)
-
-struct vivaldi_data {
- u32 function_row_physmap[MAX_FN_ROW_KEY - MIN_FN_ROW_KEY + 1];
- int max_function_row_key;
-};
-
-static ssize_t function_row_physmap_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct hid_device *hdev = to_hid_device(dev);
- struct vivaldi_data *drvdata = hid_get_drvdata(hdev);
- ssize_t size = 0;
- int i;
-
- if (!drvdata->max_function_row_key)
- return 0;
-
- for (i = 0; i < drvdata->max_function_row_key; i++)
- size += sprintf(buf + size, "%02X ",
- drvdata->function_row_physmap[i]);
- size += sprintf(buf + size, "\n");
- return size;
-}
-
-static DEVICE_ATTR_RO(function_row_physmap);
-static struct attribute *sysfs_attrs[] = {
- &dev_attr_function_row_physmap.attr,
- NULL
-};
-
-static const struct attribute_group input_attribute_group = {
- .attrs = sysfs_attrs
-};
+#include "hid-vivaldi-common.h"
static int vivaldi_probe(struct hid_device *hdev,
const struct hid_device_id *id)
@@ -70,86 +33,8 @@ static int vivaldi_probe(struct hid_device *hdev,
return hid_hw_start(hdev, HID_CONNECT_DEFAULT);
}
-static void vivaldi_feature_mapping(struct hid_device *hdev,
- struct hid_field *field,
- struct hid_usage *usage)
-{
- struct vivaldi_data *drvdata = hid_get_drvdata(hdev);
- struct hid_report *report = field->report;
- int fn_key;
- int ret;
- u32 report_len;
- u8 *report_data, *buf;
-
- if (field->logical != HID_USAGE_FN_ROW_PHYSMAP ||
- (usage->hid & HID_USAGE_PAGE) != HID_UP_ORDINAL)
- return;
-
- fn_key = (usage->hid & HID_USAGE);
- if (fn_key < MIN_FN_ROW_KEY || fn_key > MAX_FN_ROW_KEY)
- return;
- if (fn_key > drvdata->max_function_row_key)
- drvdata->max_function_row_key = fn_key;
-
- report_data = buf = hid_alloc_report_buf(report, GFP_KERNEL);
- if (!report_data)
- return;
-
- report_len = hid_report_len(report);
- if (!report->id) {
- /*
- * hid_hw_raw_request() will stuff report ID (which will be 0)
- * into the first byte of the buffer even for unnumbered
- * reports, so we need to account for this to avoid getting
- * -EOVERFLOW in return.
- * Note that hid_alloc_report_buf() adds 7 bytes to the size
- * so we can safely say that we have space for an extra byte.
- */
- report_len++;
- }
-
- ret = hid_hw_raw_request(hdev, report->id, report_data,
- report_len, HID_FEATURE_REPORT,
- HID_REQ_GET_REPORT);
- if (ret < 0) {
- dev_warn(&hdev->dev, "failed to fetch feature %d\n",
- field->report->id);
- goto out;
- }
-
- if (!report->id) {
- /*
- * Undo the damage from hid_hw_raw_request() for unnumbered
- * reports.
- */
- report_data++;
- report_len--;
- }
-
- ret = hid_report_raw_event(hdev, HID_FEATURE_REPORT, report_data,
- report_len, 0);
- if (ret) {
- dev_warn(&hdev->dev, "failed to report feature %d\n",
- field->report->id);
- goto out;
- }
-
- drvdata->function_row_physmap[fn_key - MIN_FN_ROW_KEY] =
- field->value[usage->usage_index];
-
-out:
- kfree(buf);
-}
-
-static int vivaldi_input_configured(struct hid_device *hdev,
- struct hid_input *hidinput)
-{
- return devm_device_add_group(&hdev->dev, &input_attribute_group);
-}
-
static const struct hid_device_id vivaldi_table[] = {
- { HID_DEVICE(HID_BUS_ANY, HID_GROUP_VIVALDI, HID_ANY_ID,
- HID_ANY_ID) },
+ { HID_DEVICE(HID_BUS_ANY, HID_GROUP_VIVALDI, HID_ANY_ID, HID_ANY_ID) },
{ }
};
diff --git a/drivers/input/Kconfig b/drivers/input/Kconfig
index 5baebf62df33..e2752f7364bc 100644
--- a/drivers/input/Kconfig
+++ b/drivers/input/Kconfig
@@ -77,6 +77,13 @@ config INPUT_MATRIXKMAP
To compile this driver as a module, choose M here: the
module will be called matrix-keymap.
+config INPUT_VIVALDIFMAP
+ tristate
+ help
+ ChromeOS Vivaldi keymap support library. This is a hidden
+ option so that drivers can use common code to parse and
+ expose the vivaldi function row keymap.
+
comment "Userland interfaces"
config INPUT_MOUSEDEV
diff --git a/drivers/input/Makefile b/drivers/input/Makefile
index 037cc595106c..2266c7d010ef 100644
--- a/drivers/input/Makefile
+++ b/drivers/input/Makefile
@@ -12,6 +12,7 @@ input-core-y += touchscreen.o
obj-$(CONFIG_INPUT_FF_MEMLESS) += ff-memless.o
obj-$(CONFIG_INPUT_SPARSEKMAP) += sparse-keymap.o
obj-$(CONFIG_INPUT_MATRIXKMAP) += matrix-keymap.o
+obj-$(CONFIG_INPUT_VIVALDIFMAP) += vivaldi-fmap.o
obj-$(CONFIG_INPUT_LEDS) += input-leds.o
obj-$(CONFIG_INPUT_MOUSEDEV) += mousedev.o
diff --git a/drivers/input/input.c b/drivers/input/input.c
index c3139bc2aa0d..e5a668ce884d 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -47,6 +47,17 @@ static DEFINE_MUTEX(input_mutex);
static const struct input_value input_value_sync = { EV_SYN, SYN_REPORT, 1 };
+static const unsigned int input_max_code[EV_CNT] = {
+ [EV_KEY] = KEY_MAX,
+ [EV_REL] = REL_MAX,
+ [EV_ABS] = ABS_MAX,
+ [EV_MSC] = MSC_MAX,
+ [EV_SW] = SW_MAX,
+ [EV_LED] = LED_MAX,
+ [EV_SND] = SND_MAX,
+ [EV_FF] = FF_MAX,
+};
+
static inline int is_event_supported(unsigned int code,
unsigned long *bm, unsigned int max)
{
@@ -511,6 +522,9 @@ void input_set_abs_params(struct input_dev *dev, unsigned int axis,
{
struct input_absinfo *absinfo;
+ __set_bit(EV_ABS, dev->evbit);
+ __set_bit(axis, dev->absbit);
+
input_alloc_absinfo(dev);
if (!dev->absinfo)
return;
@@ -520,12 +534,45 @@ void input_set_abs_params(struct input_dev *dev, unsigned int axis,
absinfo->maximum = max;
absinfo->fuzz = fuzz;
absinfo->flat = flat;
-
- __set_bit(EV_ABS, dev->evbit);
- __set_bit(axis, dev->absbit);
}
EXPORT_SYMBOL(input_set_abs_params);
+/**
+ * input_copy_abs - Copy absinfo from one input_dev to another
+ * @dst: Destination input device to copy the abs settings to
+ * @dst_axis: ABS_* value selecting the destination axis
+ * @src: Source input device to copy the abs settings from
+ * @src_axis: ABS_* value selecting the source axis
+ *
+ * Set absinfo for the selected destination axis by copying it from
+ * the specified source input device's source axis.
+ * This is useful to e.g. setup a pen/stylus input-device for combined
+ * touchscreen/pen hardware where the pen uses the same coordinates as
+ * the touchscreen.
+ */
+void input_copy_abs(struct input_dev *dst, unsigned int dst_axis,
+ const struct input_dev *src, unsigned int src_axis)
+{
+ /* src must have EV_ABS and src_axis set */
+ if (WARN_ON(!(test_bit(EV_ABS, src->evbit) &&
+ test_bit(src_axis, src->absbit))))
+ return;
+
+ /*
+ * input_alloc_absinfo() may have failed for the source. Our caller is
+ * expected to catch this when registering the input devices, which may
+ * happen after the input_copy_abs() call.
+ */
+ if (!src->absinfo)
+ return;
+
+ input_set_capability(dst, EV_ABS, dst_axis);
+ if (!dst->absinfo)
+ return;
+
+ dst->absinfo[dst_axis] = src->absinfo[src_axis];
+}
+EXPORT_SYMBOL(input_copy_abs);
/**
* input_grab_device - grabs device for exclusive use
@@ -2074,6 +2121,14 @@ EXPORT_SYMBOL(input_get_timestamp);
*/
void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int code)
{
+ if (type < EV_CNT && input_max_code[type] &&
+ code > input_max_code[type]) {
+ pr_err("%s: invalid code %u for type %u\n", __func__, code,
+ type);
+ dump_stack();
+ return;
+ }
+
switch (type) {
case EV_KEY:
__set_bit(code, dev->keybit);
@@ -2085,9 +2140,6 @@ void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int
case EV_ABS:
input_alloc_absinfo(dev);
- if (!dev->absinfo)
- return;
-
__set_bit(code, dev->absbit);
break;
@@ -2285,12 +2337,6 @@ int input_register_device(struct input_dev *dev)
/* KEY_RESERVED is not supposed to be transmitted to userspace. */
__clear_bit(KEY_RESERVED, dev->keybit);
- /* Buttonpads should not map BTN_RIGHT and/or BTN_MIDDLE. */
- if (test_bit(INPUT_PROP_BUTTONPAD, dev->propbit)) {
- __clear_bit(BTN_RIGHT, dev->keybit);
- __clear_bit(BTN_MIDDLE, dev->keybit);
- }
-
/* Make sure that bitmasks not mentioned in dev->evbit are clean. */
input_cleanse_bitmasks(dev);
diff --git a/drivers/input/joystick/adi.c b/drivers/input/joystick/adi.c
index 592c95b87f54..e10d57bf1180 100644
--- a/drivers/input/joystick/adi.c
+++ b/drivers/input/joystick/adi.c
@@ -123,7 +123,7 @@ static void adi_read_packet(struct adi_port *port)
{
struct adi *adi = port->adi;
struct gameport *gameport = port->gameport;
- unsigned char u, v, w, x, z;
+ unsigned char u, v, w, x;
int t[2], s[2], i;
unsigned long flags;
@@ -136,7 +136,7 @@ static void adi_read_packet(struct adi_port *port)
local_irq_save(flags);
gameport_trigger(gameport);
- v = z = gameport_read(gameport);
+ v = gameport_read(gameport);
do {
u = v;
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 4c914f75a902..18190b529bca 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -131,7 +131,7 @@ static const struct xpad_device {
{ 0x045e, 0x02e3, "Microsoft X-Box One Elite pad", 0, XTYPE_XBOXONE },
{ 0x045e, 0x02ea, "Microsoft X-Box One S pad", 0, XTYPE_XBOXONE },
{ 0x045e, 0x0719, "Xbox 360 Wireless Receiver", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W },
- { 0x045e, 0x0b12, "Microsoft Xbox One X pad", MAP_SELECT_BUTTON, XTYPE_XBOXONE },
+ { 0x045e, 0x0b12, "Microsoft Xbox Series S|X Controller", MAP_SELECT_BUTTON, XTYPE_XBOXONE },
{ 0x046d, 0xc21d, "Logitech Gamepad F310", 0, XTYPE_XBOX360 },
{ 0x046d, 0xc21e, "Logitech Gamepad F510", 0, XTYPE_XBOX360 },
{ 0x046d, 0xc21f, "Logitech Gamepad F710", 0, XTYPE_XBOX360 },
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 9417ee0b1eff..4ea79db8f134 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -103,6 +103,7 @@ config KEYBOARD_ATKBD
select SERIO_LIBPS2
select SERIO_I8042 if ARCH_MIGHT_HAVE_PC_SERIO
select SERIO_GSCPS2 if GSC
+ select INPUT_VIVALDIFMAP
help
Say Y here if you want to use a standard AT or PS/2 keyboard. Usually
you'll need this, unless you have a different type keyboard (USB, ADB
@@ -749,6 +750,7 @@ config KEYBOARD_XTKBD
config KEYBOARD_CROS_EC
tristate "ChromeOS EC keyboard"
select INPUT_MATRIXKMAP
+ select INPUT_VIVALDIFMAP
depends on CROS_EC
help
Say Y here to enable the matrix keyboard used by ChromeOS devices
@@ -779,6 +781,18 @@ config KEYBOARD_BCM
To compile this driver as a module, choose M here: the
module will be called bcm-keypad.
+config KEYBOARD_MT6779
+ tristate "MediaTek Keypad Support"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ select REGMAP_MMIO
+ select INPUT_MATRIXKMAP
+ help
+ Say Y here if you want to use the keypad on MediaTek SoCs.
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called mt6779-keypad.
+
config KEYBOARD_MTK_PMIC
tristate "MediaTek PMIC keys support"
depends on MFD_MT6397
diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile
index e3c8648f834e..721936e90290 100644
--- a/drivers/input/keyboard/Makefile
+++ b/drivers/input/keyboard/Makefile
@@ -44,6 +44,7 @@ obj-$(CONFIG_KEYBOARD_MATRIX) += matrix_keypad.o
obj-$(CONFIG_KEYBOARD_MAX7359) += max7359_keypad.o
obj-$(CONFIG_KEYBOARD_MCS) += mcs_touchkey.o
obj-$(CONFIG_KEYBOARD_MPR121) += mpr121_touchkey.o
+obj-$(CONFIG_KEYBOARD_MT6779) += mt6779-keypad.o
obj-$(CONFIG_KEYBOARD_MTK_PMIC) += mtk-pmic-keys.o
obj-$(CONFIG_KEYBOARD_NEWTON) += newtonkbd.o
obj-$(CONFIG_KEYBOARD_NOMADIK) += nomadik-ske-keypad.o
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index fbdef95291e9..d4131236d18c 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -19,6 +19,7 @@
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/input.h>
+#include <linux/input/vivaldi-fmap.h>
#include <linux/serio.h>
#include <linux/workqueue.h>
#include <linux/libps2.h>
@@ -64,8 +65,6 @@ static bool atkbd_terminal;
module_param_named(terminal, atkbd_terminal, bool, 0);
MODULE_PARM_DESC(terminal, "Enable break codes on an IBM Terminal keyboard connected via AT/PS2");
-#define MAX_FUNCTION_ROW_KEYS 24
-
#define SCANCODE(keymap) ((keymap >> 16) & 0xFFFF)
#define KEYCODE(keymap) (keymap & 0xFFFF)
@@ -237,8 +236,7 @@ struct atkbd {
/* Serializes reconnect(), attr->set() and event work */
struct mutex mutex;
- u32 function_row_physmap[MAX_FUNCTION_ROW_KEYS];
- int num_function_row_keys;
+ struct vivaldi_data vdata;
};
/*
@@ -308,17 +306,7 @@ static struct attribute *atkbd_attributes[] = {
static ssize_t atkbd_show_function_row_physmap(struct atkbd *atkbd, char *buf)
{
- ssize_t size = 0;
- int i;
-
- if (!atkbd->num_function_row_keys)
- return 0;
-
- for (i = 0; i < atkbd->num_function_row_keys; i++)
- size += scnprintf(buf + size, PAGE_SIZE - size, "%02X ",
- atkbd->function_row_physmap[i]);
- size += scnprintf(buf + size, PAGE_SIZE - size, "\n");
- return size;
+ return vivaldi_function_row_physmap_show(&atkbd->vdata, buf);
}
static umode_t atkbd_attr_is_visible(struct kobject *kobj,
@@ -329,7 +317,7 @@ static umode_t atkbd_attr_is_visible(struct kobject *kobj,
struct atkbd *atkbd = serio_get_drvdata(serio);
if (attr == &atkbd_attr_function_row_physmap.attr &&
- !atkbd->num_function_row_keys)
+ !atkbd->vdata.num_function_row_keys)
return 0;
return attr->mode;
@@ -1206,10 +1194,11 @@ static void atkbd_parse_fwnode_data(struct serio *serio)
/* Parse "function-row-physmap" property */
n = device_property_count_u32(dev, "function-row-physmap");
- if (n > 0 && n <= MAX_FUNCTION_ROW_KEYS &&
+ if (n > 0 && n <= VIVALDI_MAX_FUNCTION_ROW_KEYS &&
!device_property_read_u32_array(dev, "function-row-physmap",
- atkbd->function_row_physmap, n)) {
- atkbd->num_function_row_keys = n;
+ atkbd->vdata.function_row_physmap,
+ n)) {
+ atkbd->vdata.num_function_row_keys = n;
dev_dbg(dev, "FW reported %d function-row key locations\n", n);
}
}
diff --git a/drivers/input/keyboard/cros_ec_keyb.c b/drivers/input/keyboard/cros_ec_keyb.c
index fc02c540636e..6534dfca60b4 100644
--- a/drivers/input/keyboard/cros_ec_keyb.c
+++ b/drivers/input/keyboard/cros_ec_keyb.c
@@ -15,6 +15,7 @@
#include <linux/bitops.h>
#include <linux/i2c.h>
#include <linux/input.h>
+#include <linux/input/vivaldi-fmap.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/notifier.h>
@@ -27,8 +28,6 @@
#include <asm/unaligned.h>
-#define MAX_NUM_TOP_ROW_KEYS 15
-
/**
* struct cros_ec_keyb - Structure representing EC keyboard device
*
@@ -44,9 +43,7 @@
* @idev: The input device for the matrix keys.
* @bs_idev: The input device for non-matrix buttons and switches (or NULL).
* @notifier: interrupt event notifier for transport devices
- * @function_row_physmap: An array of the encoded rows/columns for the top
- * row function keys, in an order from left to right
- * @num_function_row_keys: The number of top row keys in a custom keyboard
+ * @vdata: vivaldi function row data
*/
struct cros_ec_keyb {
unsigned int rows;
@@ -64,8 +61,7 @@ struct cros_ec_keyb {
struct input_dev *bs_idev;
struct notifier_block notifier;
- u16 function_row_physmap[MAX_NUM_TOP_ROW_KEYS];
- size_t num_function_row_keys;
+ struct vivaldi_data vdata;
};
/**
@@ -537,9 +533,9 @@ static int cros_ec_keyb_register_matrix(struct cros_ec_keyb *ckdev)
int err;
struct property *prop;
const __be32 *p;
- u16 *physmap;
+ u32 *physmap;
u32 key_pos;
- int row, col;
+ unsigned int row, col, scancode, n_physmap;
err = matrix_keypad_parse_properties(dev, &ckdev->rows, &ckdev->cols);
if (err)
@@ -591,20 +587,21 @@ static int cros_ec_keyb_register_matrix(struct cros_ec_keyb *ckdev)
ckdev->idev = idev;
cros_ec_keyb_compute_valid_keys(ckdev);
- physmap = ckdev->function_row_physmap;
+ physmap = ckdev->vdata.function_row_physmap;
+ n_physmap = 0;
of_property_for_each_u32(dev->of_node, "function-row-physmap",
prop, p, key_pos) {
- if (ckdev->num_function_row_keys == MAX_NUM_TOP_ROW_KEYS) {
+ if (n_physmap == VIVALDI_MAX_FUNCTION_ROW_KEYS) {
dev_warn(dev, "Only support up to %d top row keys\n",
- MAX_NUM_TOP_ROW_KEYS);
+ VIVALDI_MAX_FUNCTION_ROW_KEYS);
break;
}
row = KEY_ROW(key_pos);
col = KEY_COL(key_pos);
- *physmap = MATRIX_SCAN_CODE(row, col, ckdev->row_shift);
- physmap++;
- ckdev->num_function_row_keys++;
+ scancode = MATRIX_SCAN_CODE(row, col, ckdev->row_shift);
+ physmap[n_physmap++] = scancode;
}
+ ckdev->vdata.num_function_row_keys = n_physmap;
err = input_register_device(ckdev->idev);
if (err) {
@@ -619,18 +616,10 @@ static ssize_t function_row_physmap_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- ssize_t size = 0;
- int i;
- struct cros_ec_keyb *ckdev = dev_get_drvdata(dev);
- u16 *physmap = ckdev->function_row_physmap;
-
- for (i = 0; i < ckdev->num_function_row_keys; i++)
- size += scnprintf(buf + size, PAGE_SIZE - size,
- "%s%02X", size ? " " : "", physmap[i]);
- if (size)
- size += scnprintf(buf + size, PAGE_SIZE - size, "\n");
+ const struct cros_ec_keyb *ckdev = dev_get_drvdata(dev);
+ const struct vivaldi_data *data = &ckdev->vdata;
- return size;
+ return vivaldi_function_row_physmap_show(data, buf);
}
static DEVICE_ATTR_RO(function_row_physmap);
@@ -648,7 +637,7 @@ static umode_t cros_ec_keyb_attr_is_visible(struct kobject *kobj,
struct cros_ec_keyb *ckdev = dev_get_drvdata(dev);
if (attr == &dev_attr_function_row_physmap.attr &&
- !ckdev->num_function_row_keys)
+ !ckdev->vdata.num_function_row_keys)
return 0;
return attr->mode;
diff --git a/drivers/input/keyboard/mt6779-keypad.c b/drivers/input/keyboard/mt6779-keypad.c
new file mode 100644
index 000000000000..0dbbddc7f298
--- /dev/null
+++ b/drivers/input/keyboard/mt6779-keypad.c
@@ -0,0 +1,221 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 MediaTek Inc.
+ * Author Fengping Yu <fengping.yu@mediatek.com>
+ */
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/input/matrix_keypad.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#define MTK_KPD_NAME "mt6779-keypad"
+#define MTK_KPD_MEM 0x0004
+#define MTK_KPD_DEBOUNCE 0x0018
+#define MTK_KPD_DEBOUNCE_MASK GENMASK(13, 0)
+#define MTK_KPD_DEBOUNCE_MAX_MS 256
+#define MTK_KPD_NUM_MEMS 5
+#define MTK_KPD_NUM_BITS 136 /* 4*32+8 MEM5 only use 8 BITS */
+
+struct mt6779_keypad {
+ struct regmap *regmap;
+ struct input_dev *input_dev;
+ struct clk *clk;
+ void __iomem *base;
+ u32 n_rows;
+ u32 n_cols;
+ DECLARE_BITMAP(keymap_state, MTK_KPD_NUM_BITS);
+};
+
+static const struct regmap_config mt6779_keypad_regmap_cfg = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = sizeof(u32),
+ .max_register = 36,
+};
+
+static irqreturn_t mt6779_keypad_irq_handler(int irq, void *dev_id)
+{
+ struct mt6779_keypad *keypad = dev_id;
+ const unsigned short *keycode = keypad->input_dev->keycode;
+ DECLARE_BITMAP(new_state, MTK_KPD_NUM_BITS);
+ DECLARE_BITMAP(change, MTK_KPD_NUM_BITS);
+ unsigned int bit_nr;
+ unsigned int row, col;
+ unsigned int scancode;
+ unsigned int row_shift = get_count_order(keypad->n_cols);
+ bool pressed;
+
+ regmap_bulk_read(keypad->regmap, MTK_KPD_MEM,
+ new_state, MTK_KPD_NUM_MEMS);
+
+ bitmap_xor(change, new_state, keypad->keymap_state, MTK_KPD_NUM_BITS);
+
+ for_each_set_bit(bit_nr, change, MTK_KPD_NUM_BITS) {
+ /*
+ * Registers are 32bits, but only bits [15:0] are used to
+ * indicate key status.
+ */
+ if (bit_nr % 32 >= 16)
+ continue;
+
+ row = bit_nr / 32;
+ col = bit_nr % 32;
+ scancode = MATRIX_SCAN_CODE(row, col, row_shift);
+ /* 1: not pressed, 0: pressed */
+ pressed = !test_bit(bit_nr, new_state);
+ dev_dbg(&keypad->input_dev->dev, "%s",
+ pressed ? "pressed" : "released");
+
+ input_event(keypad->input_dev, EV_MSC, MSC_SCAN, scancode);
+ input_report_key(keypad->input_dev, keycode[scancode], pressed);
+ input_sync(keypad->input_dev);
+
+ dev_dbg(&keypad->input_dev->dev,
+ "report Linux keycode = %d\n", keycode[scancode]);
+ }
+
+ bitmap_copy(keypad->keymap_state, new_state, MTK_KPD_NUM_BITS);
+
+ return IRQ_HANDLED;
+}
+
+static void mt6779_keypad_clk_disable(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
+static int mt6779_keypad_pdrv_probe(struct platform_device *pdev)
+{
+ struct mt6779_keypad *keypad;
+ int irq;
+ u32 debounce;
+ bool wakeup;
+ int error;
+
+ keypad = devm_kzalloc(&pdev->dev, sizeof(*keypad), GFP_KERNEL);
+ if (!keypad)
+ return -ENOMEM;
+
+ keypad->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(keypad->base))
+ return PTR_ERR(keypad->base);
+
+ keypad->regmap = devm_regmap_init_mmio(&pdev->dev, keypad->base,
+ &mt6779_keypad_regmap_cfg);
+ if (IS_ERR(keypad->regmap)) {
+ dev_err(&pdev->dev,
+ "regmap init failed:%pe\n", keypad->regmap);
+ return PTR_ERR(keypad->regmap);
+ }
+
+ bitmap_fill(keypad->keymap_state, MTK_KPD_NUM_BITS);
+
+ keypad->input_dev = devm_input_allocate_device(&pdev->dev);
+ if (!keypad->input_dev) {
+ dev_err(&pdev->dev, "Failed to allocate input dev\n");
+ return -ENOMEM;
+ }
+
+ keypad->input_dev->name = MTK_KPD_NAME;
+ keypad->input_dev->id.bustype = BUS_HOST;
+
+ error = matrix_keypad_parse_properties(&pdev->dev, &keypad->n_rows,
+ &keypad->n_cols);
+ if (error) {
+ dev_err(&pdev->dev, "Failed to parse keypad params\n");
+ return error;
+ }
+
+ if (device_property_read_u32(&pdev->dev, "debounce-delay-ms",
+ &debounce))
+ debounce = 16;
+
+ if (debounce > MTK_KPD_DEBOUNCE_MAX_MS) {
+ dev_err(&pdev->dev,
+ "Debounce time exceeds the maximum allowed time %dms\n",
+ MTK_KPD_DEBOUNCE_MAX_MS);
+ return -EINVAL;
+ }
+
+ wakeup = device_property_read_bool(&pdev->dev, "wakeup-source");
+
+ dev_dbg(&pdev->dev, "n_row=%d n_col=%d debounce=%d\n",
+ keypad->n_rows, keypad->n_cols, debounce);
+
+ error = matrix_keypad_build_keymap(NULL, NULL,
+ keypad->n_rows, keypad->n_cols,
+ NULL, keypad->input_dev);
+ if (error) {
+ dev_err(&pdev->dev, "Failed to build keymap\n");
+ return error;
+ }
+
+ input_set_capability(keypad->input_dev, EV_MSC, MSC_SCAN);
+
+ regmap_write(keypad->regmap, MTK_KPD_DEBOUNCE,
+ (debounce * (1 << 5)) & MTK_KPD_DEBOUNCE_MASK);
+
+ keypad->clk = devm_clk_get(&pdev->dev, "kpd");
+ if (IS_ERR(keypad->clk))
+ return PTR_ERR(keypad->clk);
+
+ error = clk_prepare_enable(keypad->clk);
+ if (error) {
+ dev_err(&pdev->dev, "cannot prepare/enable keypad clock\n");
+ return error;
+ }
+
+ error = devm_add_action_or_reset(&pdev->dev, mt6779_keypad_clk_disable,
+ keypad->clk);
+ if (error)
+ return error;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ error = devm_request_threaded_irq(&pdev->dev, irq,
+ NULL, mt6779_keypad_irq_handler,
+ IRQF_ONESHOT, MTK_KPD_NAME, keypad);
+ if (error) {
+ dev_err(&pdev->dev, "Failed to request IRQ#%d: %d\n",
+ irq, error);
+ return error;
+ }
+
+ error = input_register_device(keypad->input_dev);
+ if (error) {
+ dev_err(&pdev->dev, "Failed to register device\n");
+ return error;
+ }
+
+ error = device_init_wakeup(&pdev->dev, wakeup);
+ if (error)
+ dev_warn(&pdev->dev, "device_init_wakeup() failed: %d\n",
+ error);
+
+ return 0;
+}
+
+static const struct of_device_id mt6779_keypad_of_match[] = {
+ { .compatible = "mediatek,mt6779-keypad" },
+ { .compatible = "mediatek,mt6873-keypad" },
+ { /* sentinel */ }
+};
+
+static struct platform_driver mt6779_keypad_pdrv = {
+ .probe = mt6779_keypad_pdrv_probe,
+ .driver = {
+ .name = MTK_KPD_NAME,
+ .of_match_table = mt6779_keypad_of_match,
+ },
+};
+module_platform_driver(mt6779_keypad_pdrv);
+
+MODULE_AUTHOR("Mediatek Corporation");
+MODULE_DESCRIPTION("MTK Keypad (KPD) Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/keyboard/mtk-pmic-keys.c b/drivers/input/keyboard/mtk-pmic-keys.c
index 62391d6c7da6..c31ab4368388 100644
--- a/drivers/input/keyboard/mtk-pmic-keys.c
+++ b/drivers/input/keyboard/mtk-pmic-keys.c
@@ -9,6 +9,7 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/mfd/mt6323/registers.h>
+#include <linux/mfd/mt6358/registers.h>
#include <linux/mfd/mt6397/core.h>
#include <linux/mfd/mt6397/registers.h>
#include <linux/module.h>
@@ -74,11 +75,22 @@ static const struct mtk_pmic_regs mt6323_regs = {
.pmic_rst_reg = MT6323_TOP_RST_MISC,
};
+static const struct mtk_pmic_regs mt6358_regs = {
+ .keys_regs[MTK_PMIC_PWRKEY_INDEX] =
+ MTK_PMIC_KEYS_REGS(MT6358_TOPSTATUS,
+ 0x2, MT6358_PSC_TOP_INT_CON0, 0x5),
+ .keys_regs[MTK_PMIC_HOMEKEY_INDEX] =
+ MTK_PMIC_KEYS_REGS(MT6358_TOPSTATUS,
+ 0x8, MT6358_PSC_TOP_INT_CON0, 0xa),
+ .pmic_rst_reg = MT6358_TOP_RST_MISC,
+};
+
struct mtk_pmic_keys_info {
struct mtk_pmic_keys *keys;
const struct mtk_pmic_keys_regs *regs;
unsigned int keycode;
int irq;
+ int irq_r; /* optional: release irq if different */
bool wakeup:1;
};
@@ -188,6 +200,18 @@ static int mtk_pmic_key_setup(struct mtk_pmic_keys *keys,
return ret;
}
+ if (info->irq_r > 0) {
+ ret = devm_request_threaded_irq(keys->dev, info->irq_r, NULL,
+ mtk_pmic_keys_irq_handler_thread,
+ IRQF_ONESHOT | IRQF_TRIGGER_HIGH,
+ "mtk-pmic-keys", info);
+ if (ret) {
+ dev_err(keys->dev, "Failed to request IRQ_r: %d: %d\n",
+ info->irq, ret);
+ return ret;
+ }
+ }
+
input_set_capability(keys->input_dev, EV_KEY, info->keycode);
return 0;
@@ -199,8 +223,11 @@ static int __maybe_unused mtk_pmic_keys_suspend(struct device *dev)
int index;
for (index = 0; index < MTK_PMIC_MAX_KEY_COUNT; index++) {
- if (keys->keys[index].wakeup)
+ if (keys->keys[index].wakeup) {
enable_irq_wake(keys->keys[index].irq);
+ if (keys->keys[index].irq_r > 0)
+ enable_irq_wake(keys->keys[index].irq_r);
+ }
}
return 0;
@@ -212,8 +239,11 @@ static int __maybe_unused mtk_pmic_keys_resume(struct device *dev)
int index;
for (index = 0; index < MTK_PMIC_MAX_KEY_COUNT; index++) {
- if (keys->keys[index].wakeup)
+ if (keys->keys[index].wakeup) {
disable_irq_wake(keys->keys[index].irq);
+ if (keys->keys[index].irq_r > 0)
+ disable_irq_wake(keys->keys[index].irq_r);
+ }
}
return 0;
@@ -230,6 +260,9 @@ static const struct of_device_id of_mtk_pmic_keys_match_tbl[] = {
.compatible = "mediatek,mt6323-keys",
.data = &mt6323_regs,
}, {
+ .compatible = "mediatek,mt6358-keys",
+ .data = &mt6358_regs,
+ }, {
/* sentinel */
}
};
@@ -241,6 +274,8 @@ static int mtk_pmic_keys_probe(struct platform_device *pdev)
unsigned int keycount;
struct mt6397_chip *pmic_chip = dev_get_drvdata(pdev->dev.parent);
struct device_node *node = pdev->dev.of_node, *child;
+ static const char *const irqnames[] = { "powerkey", "homekey" };
+ static const char *const irqnames_r[] = { "powerkey_r", "homekey_r" };
struct mtk_pmic_keys *keys;
const struct mtk_pmic_regs *mtk_pmic_regs;
struct input_dev *input_dev;
@@ -268,7 +303,8 @@ static int mtk_pmic_keys_probe(struct platform_device *pdev)
input_dev->id.version = 0x0001;
keycount = of_get_available_child_count(node);
- if (keycount > MTK_PMIC_MAX_KEY_COUNT) {
+ if (keycount > MTK_PMIC_MAX_KEY_COUNT ||
+ keycount > ARRAY_SIZE(irqnames)) {
dev_err(keys->dev, "too many keys defined (%d)\n", keycount);
return -EINVAL;
}
@@ -276,12 +312,23 @@ static int mtk_pmic_keys_probe(struct platform_device *pdev)
for_each_child_of_node(node, child) {
keys->keys[index].regs = &mtk_pmic_regs->keys_regs[index];
- keys->keys[index].irq = platform_get_irq(pdev, index);
+ keys->keys[index].irq =
+ platform_get_irq_byname(pdev, irqnames[index]);
if (keys->keys[index].irq < 0) {
of_node_put(child);
return keys->keys[index].irq;
}
+ if (of_device_is_compatible(node, "mediatek,mt6358-keys")) {
+ keys->keys[index].irq_r = platform_get_irq_byname(pdev,
+ irqnames_r[index]);
+
+ if (keys->keys[index].irq_r < 0) {
+ of_node_put(child);
+ return keys->keys[index].irq_r;
+ }
+ }
+
error = of_property_read_u32(child,
"linux,keycodes", &keys->keys[index].keycode);
if (error) {
diff --git a/drivers/input/misc/da9063_onkey.c b/drivers/input/misc/da9063_onkey.c
index 79851923ee57..b14a389600c9 100644
--- a/drivers/input/misc/da9063_onkey.c
+++ b/drivers/input/misc/da9063_onkey.c
@@ -4,6 +4,7 @@
* Copyright (C) 2015 Dialog Semiconductor Ltd.
*/
+#include <linux/devm-helpers.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/input.h>
@@ -182,13 +183,6 @@ static irqreturn_t da9063_onkey_irq_handler(int irq, void *data)
return IRQ_HANDLED;
}
-static void da9063_cancel_poll(void *data)
-{
- struct da9063_onkey *onkey = data;
-
- cancel_delayed_work_sync(&onkey->work);
-}
-
static int da9063_onkey_probe(struct platform_device *pdev)
{
struct da9063_onkey *onkey;
@@ -234,9 +228,8 @@ static int da9063_onkey_probe(struct platform_device *pdev)
input_set_capability(onkey->input, EV_KEY, KEY_POWER);
- INIT_DELAYED_WORK(&onkey->work, da9063_poll_on);
-
- error = devm_add_action(&pdev->dev, da9063_cancel_poll, onkey);
+ error = devm_delayed_work_autocancel(&pdev->dev, &onkey->work,
+ da9063_poll_on);
if (error) {
dev_err(&pdev->dev,
"Failed to add cancel poll action: %d\n",
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index ffad142801b3..434d48ae4b12 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -186,6 +186,7 @@ static const char * const smbus_pnp_ids[] = {
"LEN2044", /* L470 */
"LEN2054", /* E480 */
"LEN2055", /* E580 */
+ "LEN2064", /* T14 Gen 1 AMD / P14s Gen 1 AMD */
"LEN2068", /* T14 Gen 1 */
"SYN3052", /* HP EliteBook 840 G4 */
"SYN3221", /* HP 15-ay000 */
diff --git a/drivers/input/serio/ps2-gpio.c b/drivers/input/serio/ps2-gpio.c
index 8970b49ea09a..9b02dd5dd2b9 100644
--- a/drivers/input/serio/ps2-gpio.c
+++ b/drivers/input/serio/ps2-gpio.c
@@ -19,6 +19,7 @@
#include <linux/of.h>
#include <linux/jiffies.h>
#include <linux/delay.h>
+#include <linux/timekeeping.h>
#define DRIVER_NAME "ps2-gpio"
@@ -36,14 +37,37 @@
#define PS2_DATA_BIT7 8
#define PS2_PARITY_BIT 9
#define PS2_STOP_BIT 10
-#define PS2_TX_TIMEOUT 11
-#define PS2_ACK_BIT 12
+#define PS2_ACK_BIT 11
#define PS2_DEV_RET_ACK 0xfa
#define PS2_DEV_RET_NACK 0xfe
#define PS2_CMD_RESEND 0xfe
+/*
+ * The PS2 protocol specifies a clock frequency between 10kHz and 16.7kHz,
+ * therefore the maximal interrupt interval should be 100us and the minimum
+ * interrupt interval should be ~60us. Let's allow +/- 20us for frequency
+ * deviations and interrupt latency.
+ *
+ * The data line must be samples after ~30us to 50us after the falling edge,
+ * since the device updates the data line at the rising edge.
+ *
+ * ___ ______ ______ ______ ___
+ * \ / \ / \ / \ /
+ * \ / \ / \ / \ /
+ * \______/ \______/ \______/ \______/
+ *
+ * |-----------------| |--------|
+ * 60us/100us 30us/50us
+ */
+#define PS2_CLK_FREQ_MIN_HZ 10000
+#define PS2_CLK_FREQ_MAX_HZ 16700
+#define PS2_CLK_MIN_INTERVAL_US ((1000 * 1000) / PS2_CLK_FREQ_MAX_HZ)
+#define PS2_CLK_MAX_INTERVAL_US ((1000 * 1000) / PS2_CLK_FREQ_MIN_HZ)
+#define PS2_IRQ_MIN_INTERVAL_US (PS2_CLK_MIN_INTERVAL_US - 20)
+#define PS2_IRQ_MAX_INTERVAL_US (PS2_CLK_MAX_INTERVAL_US + 20)
+
struct ps2_gpio_data {
struct device *dev;
struct serio *serio;
@@ -52,19 +76,30 @@ struct ps2_gpio_data {
struct gpio_desc *gpio_data;
bool write_enable;
int irq;
- unsigned char rx_cnt;
- unsigned char rx_byte;
- unsigned char tx_cnt;
- unsigned char tx_byte;
- struct completion tx_done;
- struct mutex tx_mutex;
- struct delayed_work tx_work;
+ ktime_t t_irq_now;
+ ktime_t t_irq_last;
+ struct {
+ unsigned char cnt;
+ unsigned char byte;
+ } rx;
+ struct {
+ unsigned char cnt;
+ unsigned char byte;
+ ktime_t t_xfer_start;
+ ktime_t t_xfer_end;
+ struct completion complete;
+ struct mutex mutex;
+ struct delayed_work work;
+ } tx;
};
static int ps2_gpio_open(struct serio *serio)
{
struct ps2_gpio_data *drvdata = serio->port_data;
+ drvdata->t_irq_last = 0;
+ drvdata->tx.t_xfer_end = 0;
+
enable_irq(drvdata->irq);
return 0;
}
@@ -73,7 +108,7 @@ static void ps2_gpio_close(struct serio *serio)
{
struct ps2_gpio_data *drvdata = serio->port_data;
- flush_delayed_work(&drvdata->tx_work);
+ flush_delayed_work(&drvdata->tx.work);
disable_irq(drvdata->irq);
}
@@ -85,9 +120,9 @@ static int __ps2_gpio_write(struct serio *serio, unsigned char val)
gpiod_direction_output(drvdata->gpio_clk, 0);
drvdata->mode = PS2_MODE_TX;
- drvdata->tx_byte = val;
+ drvdata->tx.byte = val;
- schedule_delayed_work(&drvdata->tx_work, usecs_to_jiffies(200));
+ schedule_delayed_work(&drvdata->tx.work, usecs_to_jiffies(200));
return 0;
}
@@ -98,12 +133,12 @@ static int ps2_gpio_write(struct serio *serio, unsigned char val)
int ret = 0;
if (in_task()) {
- mutex_lock(&drvdata->tx_mutex);
+ mutex_lock(&drvdata->tx.mutex);
__ps2_gpio_write(serio, val);
- if (!wait_for_completion_timeout(&drvdata->tx_done,
+ if (!wait_for_completion_timeout(&drvdata->tx.complete,
msecs_to_jiffies(10000)))
ret = SERIO_TIMEOUT;
- mutex_unlock(&drvdata->tx_mutex);
+ mutex_unlock(&drvdata->tx.mutex);
} else {
__ps2_gpio_write(serio, val);
}
@@ -115,9 +150,10 @@ static void ps2_gpio_tx_work_fn(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
struct ps2_gpio_data *drvdata = container_of(dwork,
- struct ps2_gpio_data,
- tx_work);
+ struct ps2_gpio_data,
+ tx.work);
+ drvdata->tx.t_xfer_start = ktime_get();
enable_irq(drvdata->irq);
gpiod_direction_output(drvdata->gpio_data, 0);
gpiod_direction_input(drvdata->gpio_clk);
@@ -128,20 +164,31 @@ static irqreturn_t ps2_gpio_irq_rx(struct ps2_gpio_data *drvdata)
unsigned char byte, cnt;
int data;
int rxflags = 0;
- static unsigned long old_jiffies;
+ s64 us_delta;
- byte = drvdata->rx_byte;
- cnt = drvdata->rx_cnt;
+ byte = drvdata->rx.byte;
+ cnt = drvdata->rx.cnt;
- if (old_jiffies == 0)
- old_jiffies = jiffies;
+ drvdata->t_irq_now = ktime_get();
+
+ /*
+ * We need to consider spurious interrupts happening right after
+ * a TX xfer finished.
+ */
+ us_delta = ktime_us_delta(drvdata->t_irq_now, drvdata->tx.t_xfer_end);
+ if (unlikely(us_delta < PS2_IRQ_MIN_INTERVAL_US))
+ goto end;
- if ((jiffies - old_jiffies) > usecs_to_jiffies(100)) {
+ us_delta = ktime_us_delta(drvdata->t_irq_now, drvdata->t_irq_last);
+ if (us_delta > PS2_IRQ_MAX_INTERVAL_US && cnt) {
dev_err(drvdata->dev,
"RX: timeout, probably we missed an interrupt\n");
goto err;
+ } else if (unlikely(us_delta < PS2_IRQ_MIN_INTERVAL_US)) {
+ /* Ignore spurious IRQs. */
+ goto end;
}
- old_jiffies = jiffies;
+ drvdata->t_irq_last = drvdata->t_irq_now;
data = gpiod_get_value(drvdata->gpio_data);
if (unlikely(data < 0)) {
@@ -178,8 +225,16 @@ static irqreturn_t ps2_gpio_irq_rx(struct ps2_gpio_data *drvdata)
if (!drvdata->write_enable)
goto err;
}
+ break;
+ case PS2_STOP_BIT:
+ /* stop bit should be high */
+ if (unlikely(!data)) {
+ dev_err(drvdata->dev, "RX: stop bit should be high\n");
+ goto err;
+ }
- /* Do not send spurious ACK's and NACK's when write fn is
+ /*
+ * Do not send spurious ACK's and NACK's when write fn is
* not provided.
*/
if (!drvdata->write_enable) {
@@ -189,23 +244,11 @@ static irqreturn_t ps2_gpio_irq_rx(struct ps2_gpio_data *drvdata)
break;
}
- /* Let's send the data without waiting for the stop bit to be
- * sent. It may happen that we miss the stop bit. When this
- * happens we have no way to recover from this, certainly
- * missing the parity bit would be recognized when processing
- * the stop bit. When missing both, data is lost.
- */
serio_interrupt(drvdata->serio, byte, rxflags);
dev_dbg(drvdata->dev, "RX: sending byte 0x%x\n", byte);
- break;
- case PS2_STOP_BIT:
- /* stop bit should be high */
- if (unlikely(!data)) {
- dev_err(drvdata->dev, "RX: stop bit should be high\n");
- goto err;
- }
+
cnt = byte = 0;
- old_jiffies = 0;
+
goto end; /* success */
default:
dev_err(drvdata->dev, "RX: got out of sync with the device\n");
@@ -217,11 +260,10 @@ static irqreturn_t ps2_gpio_irq_rx(struct ps2_gpio_data *drvdata)
err:
cnt = byte = 0;
- old_jiffies = 0;
__ps2_gpio_write(drvdata->serio, PS2_CMD_RESEND);
end:
- drvdata->rx_cnt = cnt;
- drvdata->rx_byte = byte;
+ drvdata->rx.cnt = cnt;
+ drvdata->rx.byte = byte;
return IRQ_HANDLED;
}
@@ -229,20 +271,34 @@ static irqreturn_t ps2_gpio_irq_tx(struct ps2_gpio_data *drvdata)
{
unsigned char byte, cnt;
int data;
- static unsigned long old_jiffies;
+ s64 us_delta;
+
+ cnt = drvdata->tx.cnt;
+ byte = drvdata->tx.byte;
- cnt = drvdata->tx_cnt;
- byte = drvdata->tx_byte;
+ drvdata->t_irq_now = ktime_get();
- if (old_jiffies == 0)
- old_jiffies = jiffies;
+ /*
+ * There might be pending IRQs since we disabled IRQs in
+ * __ps2_gpio_write(). We can expect at least one clock period until
+ * the device generates the first falling edge after releasing the
+ * clock line.
+ */
+ us_delta = ktime_us_delta(drvdata->t_irq_now,
+ drvdata->tx.t_xfer_start);
+ if (unlikely(us_delta < PS2_CLK_MIN_INTERVAL_US))
+ goto end;
- if ((jiffies - old_jiffies) > usecs_to_jiffies(100)) {
+ us_delta = ktime_us_delta(drvdata->t_irq_now, drvdata->t_irq_last);
+ if (us_delta > PS2_IRQ_MAX_INTERVAL_US && cnt > 1) {
dev_err(drvdata->dev,
"TX: timeout, probably we missed an interrupt\n");
goto err;
+ } else if (unlikely(us_delta < PS2_IRQ_MIN_INTERVAL_US)) {
+ /* Ignore spurious IRQs. */
+ goto end;
}
- old_jiffies = jiffies;
+ drvdata->t_irq_last = drvdata->t_irq_now;
switch (cnt) {
case PS2_START_BIT:
@@ -270,27 +326,22 @@ static irqreturn_t ps2_gpio_irq_tx(struct ps2_gpio_data *drvdata)
/* release data line to generate stop bit */
gpiod_direction_input(drvdata->gpio_data);
break;
- case PS2_TX_TIMEOUT:
- /* Devices generate one extra clock pulse before sending the
- * acknowledgment.
- */
- break;
case PS2_ACK_BIT:
- gpiod_direction_input(drvdata->gpio_data);
data = gpiod_get_value(drvdata->gpio_data);
if (data) {
dev_warn(drvdata->dev, "TX: received NACK, retry\n");
goto err;
}
+ drvdata->tx.t_xfer_end = ktime_get();
drvdata->mode = PS2_MODE_RX;
- complete(&drvdata->tx_done);
+ complete(&drvdata->tx.complete);
cnt = 1;
- old_jiffies = 0;
goto end; /* success */
default:
- /* Probably we missed the stop bit. Therefore we release data
+ /*
+ * Probably we missed the stop bit. Therefore we release data
* line and try again.
*/
gpiod_direction_input(drvdata->gpio_data);
@@ -303,11 +354,10 @@ static irqreturn_t ps2_gpio_irq_tx(struct ps2_gpio_data *drvdata)
err:
cnt = 1;
- old_jiffies = 0;
gpiod_direction_input(drvdata->gpio_data);
- __ps2_gpio_write(drvdata->serio, drvdata->tx_byte);
+ __ps2_gpio_write(drvdata->serio, drvdata->tx.byte);
end:
- drvdata->tx_cnt = cnt;
+ drvdata->tx.cnt = cnt;
return IRQ_HANDLED;
}
@@ -322,14 +372,19 @@ static irqreturn_t ps2_gpio_irq(int irq, void *dev_id)
static int ps2_gpio_get_props(struct device *dev,
struct ps2_gpio_data *drvdata)
{
- drvdata->gpio_data = devm_gpiod_get(dev, "data", GPIOD_IN);
+ enum gpiod_flags gflags;
+
+ /* Enforce open drain, since this is required by the PS/2 bus. */
+ gflags = GPIOD_IN | GPIOD_FLAGS_BIT_OPEN_DRAIN;
+
+ drvdata->gpio_data = devm_gpiod_get(dev, "data", gflags);
if (IS_ERR(drvdata->gpio_data)) {
dev_err(dev, "failed to request data gpio: %ld",
PTR_ERR(drvdata->gpio_data));
return PTR_ERR(drvdata->gpio_data);
}
- drvdata->gpio_clk = devm_gpiod_get(dev, "clk", GPIOD_IN);
+ drvdata->gpio_clk = devm_gpiod_get(dev, "clk", gflags);
if (IS_ERR(drvdata->gpio_clk)) {
dev_err(dev, "failed to request clock gpio: %ld",
PTR_ERR(drvdata->gpio_clk));
@@ -387,7 +442,8 @@ static int ps2_gpio_probe(struct platform_device *pdev)
serio->id.type = SERIO_8042;
serio->open = ps2_gpio_open;
serio->close = ps2_gpio_close;
- /* Write can be enabled in platform/dt data, but possibly it will not
+ /*
+ * Write can be enabled in platform/dt data, but possibly it will not
* work because of the tough timings.
*/
serio->write = drvdata->write_enable ? ps2_gpio_write : NULL;
@@ -400,14 +456,15 @@ static int ps2_gpio_probe(struct platform_device *pdev)
drvdata->dev = dev;
drvdata->mode = PS2_MODE_RX;
- /* Tx count always starts at 1, as the start bit is sent implicitly by
+ /*
+ * Tx count always starts at 1, as the start bit is sent implicitly by
* host-to-device communication initialization.
*/
- drvdata->tx_cnt = 1;
+ drvdata->tx.cnt = 1;
- INIT_DELAYED_WORK(&drvdata->tx_work, ps2_gpio_tx_work_fn);
- init_completion(&drvdata->tx_done);
- mutex_init(&drvdata->tx_mutex);
+ INIT_DELAYED_WORK(&drvdata->tx.work, ps2_gpio_tx_work_fn);
+ init_completion(&drvdata->tx.complete);
+ mutex_init(&drvdata->tx.mutex);
serio_register_port(serio);
platform_set_drvdata(pdev, drvdata);
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index ff7794cecf69..43c7d6e5bdc0 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -638,6 +638,16 @@ config TOUCHSCREEN_MTOUCH
To compile this driver as a module, choose M here: the
module will be called mtouch.
+config TOUCHSCREEN_IMAGIS
+ tristate "Imagis touchscreen support"
+ depends on I2C
+ help
+ Say Y here if you have an Imagis IST30xxC touchscreen.
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called imagis.
+
config TOUCHSCREEN_IMX6UL_TSC
tristate "Freescale i.MX6UL touchscreen controller"
depends on ((OF && GPIOLIB) || COMPILE_TEST) && HAS_IOMEM
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index 39a8127cf6a5..557f84fd2075 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -49,6 +49,7 @@ obj-$(CONFIG_TOUCHSCREEN_GOODIX) += goodix_ts.o
obj-$(CONFIG_TOUCHSCREEN_HIDEEP) += hideep.o
obj-$(CONFIG_TOUCHSCREEN_ILI210X) += ili210x.o
obj-$(CONFIG_TOUCHSCREEN_ILITEK) += ilitek_ts_i2c.o
+obj-$(CONFIG_TOUCHSCREEN_IMAGIS) += imagis.o
obj-$(CONFIG_TOUCHSCREEN_IMX6UL_TSC) += imx6ul_tsc.o
obj-$(CONFIG_TOUCHSCREEN_INEXIO) += inexio.o
obj-$(CONFIG_TOUCHSCREEN_IPROC) += bcm_iproc_tsc.o
diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
index 752e8ba4fecb..3ad9870db108 100644
--- a/drivers/input/touchscreen/goodix.c
+++ b/drivers/input/touchscreen/goodix.c
@@ -298,32 +298,17 @@ static int goodix_ts_read_input_report(struct goodix_ts_data *ts, u8 *data)
return -ENOMSG;
}
-static struct input_dev *goodix_create_pen_input(struct goodix_ts_data *ts)
+static int goodix_create_pen_input(struct goodix_ts_data *ts)
{
struct device *dev = &ts->client->dev;
struct input_dev *input;
input = devm_input_allocate_device(dev);
if (!input)
- return NULL;
-
- input_alloc_absinfo(input);
- if (!input->absinfo) {
- input_free_device(input);
- return NULL;
- }
-
- input->absinfo[ABS_X] = ts->input_dev->absinfo[ABS_MT_POSITION_X];
- input->absinfo[ABS_Y] = ts->input_dev->absinfo[ABS_MT_POSITION_Y];
- __set_bit(ABS_X, input->absbit);
- __set_bit(ABS_Y, input->absbit);
- input_set_abs_params(input, ABS_PRESSURE, 0, 255, 0, 0);
+ return -ENOMEM;
- input_set_capability(input, EV_KEY, BTN_TOUCH);
- input_set_capability(input, EV_KEY, BTN_TOOL_PEN);
- input_set_capability(input, EV_KEY, BTN_STYLUS);
- input_set_capability(input, EV_KEY, BTN_STYLUS2);
- __set_bit(INPUT_PROP_DIRECT, input->propbit);
+ input_copy_abs(input, ABS_X, ts->input_dev, ABS_MT_POSITION_X);
+ input_copy_abs(input, ABS_Y, ts->input_dev, ABS_MT_POSITION_Y);
/*
* The resolution of these touchscreens is about 10 units/mm, the actual
* resolution does not matter much since we set INPUT_PROP_DIRECT.
@@ -331,6 +316,13 @@ static struct input_dev *goodix_create_pen_input(struct goodix_ts_data *ts)
*/
input_abs_set_res(input, ABS_X, 10);
input_abs_set_res(input, ABS_Y, 10);
+ input_set_abs_params(input, ABS_PRESSURE, 0, 255, 0, 0);
+
+ input_set_capability(input, EV_KEY, BTN_TOUCH);
+ input_set_capability(input, EV_KEY, BTN_TOOL_PEN);
+ input_set_capability(input, EV_KEY, BTN_STYLUS);
+ input_set_capability(input, EV_KEY, BTN_STYLUS2);
+ __set_bit(INPUT_PROP_DIRECT, input->propbit);
input->name = "Goodix Active Pen";
input->phys = "input/pen";
@@ -340,25 +332,23 @@ static struct input_dev *goodix_create_pen_input(struct goodix_ts_data *ts)
input->id.product = 0x1001;
input->id.version = ts->version;
- if (input_register_device(input) != 0) {
- input_free_device(input);
- return NULL;
- }
-
- return input;
+ ts->input_pen = input;
+ return 0;
}
static void goodix_ts_report_pen_down(struct goodix_ts_data *ts, u8 *data)
{
- int input_x, input_y, input_w;
+ int input_x, input_y, input_w, error;
u8 key_value;
- if (!ts->input_pen) {
- ts->input_pen = goodix_create_pen_input(ts);
- if (!ts->input_pen)
- return;
+ if (!ts->pen_input_registered) {
+ error = input_register_device(ts->input_pen);
+ ts->pen_input_registered = (error == 0) ? 1 : error;
}
+ if (ts->pen_input_registered < 0)
+ return;
+
if (ts->contact_size == 9) {
input_x = get_unaligned_le16(&data[4]);
input_y = get_unaligned_le16(&data[6]);
@@ -1215,6 +1205,17 @@ static int goodix_configure_dev(struct goodix_ts_data *ts)
return error;
}
+ /*
+ * Create the input_pen device before goodix_request_irq() calls
+ * devm_request_threaded_irq() so that the devm framework frees
+ * it after disabling the irq.
+ * Unfortunately there is no way to detect if the touchscreen has pen
+ * support, so registering the dev is delayed till the first pen event.
+ */
+ error = goodix_create_pen_input(ts);
+ if (error)
+ return error;
+
ts->irq_flags = goodix_irq_flags[ts->int_trigger_type] | IRQF_ONESHOT;
error = goodix_request_irq(ts);
if (error) {
diff --git a/drivers/input/touchscreen/goodix.h b/drivers/input/touchscreen/goodix.h
index fa8602e78a64..87797cc88b32 100644
--- a/drivers/input/touchscreen/goodix.h
+++ b/drivers/input/touchscreen/goodix.h
@@ -94,6 +94,7 @@ struct goodix_ts_data {
u16 version;
bool reset_controller_at_probe;
bool load_cfg_from_disk;
+ int pen_input_registered;
struct completion firmware_loading_complete;
unsigned long irq_flags;
enum goodix_irq_pin_access_method irq_pin_access_method;
diff --git a/drivers/input/touchscreen/imagis.c b/drivers/input/touchscreen/imagis.c
new file mode 100644
index 000000000000..e2697e6c6d2a
--- /dev/null
+++ b/drivers/input/touchscreen/imagis.c
@@ -0,0 +1,367 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/bits.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/input/mt.h>
+#include <linux/input/touchscreen.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/regulator/consumer.h>
+
+#define IST3038C_HIB_ACCESS (0x800B << 16)
+#define IST3038C_DIRECT_ACCESS BIT(31)
+#define IST3038C_REG_CHIPID 0x40001000
+#define IST3038C_REG_HIB_BASE 0x30000100
+#define IST3038C_REG_TOUCH_STATUS (IST3038C_REG_HIB_BASE | IST3038C_HIB_ACCESS)
+#define IST3038C_REG_TOUCH_COORD (IST3038C_REG_HIB_BASE | IST3038C_HIB_ACCESS | 0x8)
+#define IST3038C_REG_INTR_MESSAGE (IST3038C_REG_HIB_BASE | IST3038C_HIB_ACCESS | 0x4)
+#define IST3038C_WHOAMI 0x38c
+#define IST3038C_CHIP_ON_DELAY_MS 60
+#define IST3038C_I2C_RETRY_COUNT 3
+#define IST3038C_MAX_FINGER_NUM 10
+#define IST3038C_X_MASK GENMASK(23, 12)
+#define IST3038C_X_SHIFT 12
+#define IST3038C_Y_MASK GENMASK(11, 0)
+#define IST3038C_AREA_MASK GENMASK(27, 24)
+#define IST3038C_AREA_SHIFT 24
+#define IST3038C_FINGER_COUNT_MASK GENMASK(15, 12)
+#define IST3038C_FINGER_COUNT_SHIFT 12
+#define IST3038C_FINGER_STATUS_MASK GENMASK(9, 0)
+
+struct imagis_ts {
+ struct i2c_client *client;
+ struct input_dev *input_dev;
+ struct touchscreen_properties prop;
+ struct regulator_bulk_data supplies[2];
+};
+
+static int imagis_i2c_read_reg(struct imagis_ts *ts,
+ unsigned int reg, u32 *data)
+{
+ __be32 ret_be;
+ __be32 reg_be = cpu_to_be32(reg);
+ struct i2c_msg msg[] = {
+ {
+ .addr = ts->client->addr,
+ .flags = 0,
+ .buf = (unsigned char *)&reg_be,
+ .len = sizeof(reg_be),
+ }, {
+ .addr = ts->client->addr,
+ .flags = I2C_M_RD,
+ .buf = (unsigned char *)&ret_be,
+ .len = sizeof(ret_be),
+ },
+ };
+ int ret, error;
+ int retry = IST3038C_I2C_RETRY_COUNT;
+
+ /* Retry in case the controller fails to respond */
+ do {
+ ret = i2c_transfer(ts->client->adapter, msg, ARRAY_SIZE(msg));
+ if (ret == ARRAY_SIZE(msg)) {
+ *data = be32_to_cpu(ret_be);
+ return 0;
+ }
+
+ error = ret < 0 ? ret : -EIO;
+ dev_err(&ts->client->dev,
+ "%s - i2c_transfer failed: %d (%d)\n",
+ __func__, error, ret);
+ } while (--retry);
+
+ return error;
+}
+
+static irqreturn_t imagis_interrupt(int irq, void *dev_id)
+{
+ struct imagis_ts *ts = dev_id;
+ u32 intr_message, finger_status;
+ unsigned int finger_count, finger_pressed;
+ int i;
+ int error;
+
+ error = imagis_i2c_read_reg(ts, IST3038C_REG_INTR_MESSAGE,
+ &intr_message);
+ if (error) {
+ dev_err(&ts->client->dev,
+ "failed to read the interrupt message: %d\n", error);
+ goto out;
+ }
+
+ finger_count = (intr_message & IST3038C_FINGER_COUNT_MASK) >>
+ IST3038C_FINGER_COUNT_SHIFT;
+ if (finger_count > IST3038C_MAX_FINGER_NUM) {
+ dev_err(&ts->client->dev,
+ "finger count %d is more than maximum supported\n",
+ finger_count);
+ goto out;
+ }
+
+ finger_pressed = intr_message & IST3038C_FINGER_STATUS_MASK;
+
+ for (i = 0; i < finger_count; i++) {
+ error = imagis_i2c_read_reg(ts,
+ IST3038C_REG_TOUCH_COORD + (i * 4),
+ &finger_status);
+ if (error) {
+ dev_err(&ts->client->dev,
+ "failed to read coordinates for finger %d: %d\n",
+ i, error);
+ goto out;
+ }
+
+ input_mt_slot(ts->input_dev, i);
+ input_mt_report_slot_state(ts->input_dev, MT_TOOL_FINGER,
+ finger_pressed & BIT(i));
+ touchscreen_report_pos(ts->input_dev, &ts->prop,
+ (finger_status & IST3038C_X_MASK) >>
+ IST3038C_X_SHIFT,
+ finger_status & IST3038C_Y_MASK, 1);
+ input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR,
+ (finger_status & IST3038C_AREA_MASK) >>
+ IST3038C_AREA_SHIFT);
+ }
+
+ input_mt_sync_frame(ts->input_dev);
+ input_sync(ts->input_dev);
+
+out:
+ return IRQ_HANDLED;
+}
+
+static void imagis_power_off(void *_ts)
+{
+ struct imagis_ts *ts = _ts;
+
+ regulator_bulk_disable(ARRAY_SIZE(ts->supplies), ts->supplies);
+}
+
+static int imagis_power_on(struct imagis_ts *ts)
+{
+ int error;
+
+ error = regulator_bulk_enable(ARRAY_SIZE(ts->supplies), ts->supplies);
+ if (error)
+ return error;
+
+ msleep(IST3038C_CHIP_ON_DELAY_MS);
+
+ return 0;
+}
+
+static int imagis_start(struct imagis_ts *ts)
+{
+ int error;
+
+ error = imagis_power_on(ts);
+ if (error)
+ return error;
+
+ enable_irq(ts->client->irq);
+
+ return 0;
+}
+
+static int imagis_stop(struct imagis_ts *ts)
+{
+ disable_irq(ts->client->irq);
+
+ imagis_power_off(ts);
+
+ return 0;
+}
+
+static int imagis_input_open(struct input_dev *dev)
+{
+ struct imagis_ts *ts = input_get_drvdata(dev);
+
+ return imagis_start(ts);
+}
+
+static void imagis_input_close(struct input_dev *dev)
+{
+ struct imagis_ts *ts = input_get_drvdata(dev);
+
+ imagis_stop(ts);
+}
+
+static int imagis_init_input_dev(struct imagis_ts *ts)
+{
+ struct input_dev *input_dev;
+ int error;
+
+ input_dev = devm_input_allocate_device(&ts->client->dev);
+ if (!input_dev)
+ return -ENOMEM;
+
+ ts->input_dev = input_dev;
+
+ input_dev->name = "Imagis capacitive touchscreen";
+ input_dev->phys = "input/ts";
+ input_dev->id.bustype = BUS_I2C;
+ input_dev->open = imagis_input_open;
+ input_dev->close = imagis_input_close;
+
+ input_set_drvdata(input_dev, ts);
+
+ input_set_capability(input_dev, EV_ABS, ABS_MT_POSITION_X);
+ input_set_capability(input_dev, EV_ABS, ABS_MT_POSITION_Y);
+ input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, 0, 255, 0, 0);
+
+ touchscreen_parse_properties(input_dev, true, &ts->prop);
+ if (!ts->prop.max_x || !ts->prop.max_y) {
+ dev_err(&ts->client->dev,
+ "Touchscreen-size-x and/or touchscreen-size-y not set in dts\n");
+ return -EINVAL;
+ }
+
+ error = input_mt_init_slots(input_dev,
+ IST3038C_MAX_FINGER_NUM,
+ INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED);
+ if (error) {
+ dev_err(&ts->client->dev,
+ "Failed to initialize MT slots: %d", error);
+ return error;
+ }
+
+ error = input_register_device(input_dev);
+ if (error) {
+ dev_err(&ts->client->dev,
+ "Failed to register input device: %d", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int imagis_init_regulators(struct imagis_ts *ts)
+{
+ struct i2c_client *client = ts->client;
+
+ ts->supplies[0].supply = "vdd";
+ ts->supplies[1].supply = "vddio";
+ return devm_regulator_bulk_get(&client->dev,
+ ARRAY_SIZE(ts->supplies),
+ ts->supplies);
+}
+
+static int imagis_probe(struct i2c_client *i2c)
+{
+ struct device *dev = &i2c->dev;
+ struct imagis_ts *ts;
+ int chip_id, error;
+
+ ts = devm_kzalloc(dev, sizeof(*ts), GFP_KERNEL);
+ if (!ts)
+ return -ENOMEM;
+
+ ts->client = i2c;
+
+ error = imagis_init_regulators(ts);
+ if (error) {
+ dev_err(dev, "regulator init error: %d\n", error);
+ return error;
+ }
+
+ error = imagis_power_on(ts);
+ if (error) {
+ dev_err(dev, "failed to enable regulators: %d\n", error);
+ return error;
+ }
+
+ error = devm_add_action_or_reset(dev, imagis_power_off, ts);
+ if (error) {
+ dev_err(dev, "failed to install poweroff action: %d\n", error);
+ return error;
+ }
+
+ error = imagis_i2c_read_reg(ts,
+ IST3038C_REG_CHIPID | IST3038C_DIRECT_ACCESS,
+ &chip_id);
+ if (error) {
+ dev_err(dev, "chip ID read failure: %d\n", error);
+ return error;
+ }
+
+ if (chip_id != IST3038C_WHOAMI) {
+ dev_err(dev, "unknown chip ID: 0x%x\n", chip_id);
+ return -EINVAL;
+ }
+
+ error = devm_request_threaded_irq(dev, i2c->irq,
+ NULL, imagis_interrupt,
+ IRQF_ONESHOT | IRQF_NO_AUTOEN,
+ "imagis-touchscreen", ts);
+ if (error) {
+ dev_err(dev, "IRQ %d allocation failure: %d\n",
+ i2c->irq, error);
+ return error;
+ }
+
+ error = imagis_init_input_dev(ts);
+ if (error)
+ return error;
+
+ return 0;
+}
+
+static int __maybe_unused imagis_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct imagis_ts *ts = i2c_get_clientdata(client);
+ int retval = 0;
+
+ mutex_lock(&ts->input_dev->mutex);
+
+ if (input_device_enabled(ts->input_dev))
+ retval = imagis_stop(ts);
+
+ mutex_unlock(&ts->input_dev->mutex);
+
+ return retval;
+}
+
+static int __maybe_unused imagis_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct imagis_ts *ts = i2c_get_clientdata(client);
+ int retval = 0;
+
+ mutex_lock(&ts->input_dev->mutex);
+
+ if (input_device_enabled(ts->input_dev))
+ retval = imagis_start(ts);
+
+ mutex_unlock(&ts->input_dev->mutex);
+
+ return retval;
+}
+
+static SIMPLE_DEV_PM_OPS(imagis_pm_ops, imagis_suspend, imagis_resume);
+
+#ifdef CONFIG_OF
+static const struct of_device_id imagis_of_match[] = {
+ { .compatible = "imagis,ist3038c", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, imagis_of_match);
+#endif
+
+static struct i2c_driver imagis_ts_driver = {
+ .driver = {
+ .name = "imagis-touchscreen",
+ .pm = &imagis_pm_ops,
+ .of_match_table = of_match_ptr(imagis_of_match),
+ },
+ .probe_new = imagis_probe,
+};
+
+module_i2c_driver(imagis_ts_driver);
+
+MODULE_DESCRIPTION("Imagis IST3038C Touchscreen Driver");
+MODULE_AUTHOR("Markuss Broks <markuss.broks@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/iqs5xx.c b/drivers/input/touchscreen/iqs5xx.c
index b3fa71213d60..34c4cca57d13 100644
--- a/drivers/input/touchscreen/iqs5xx.c
+++ b/drivers/input/touchscreen/iqs5xx.c
@@ -486,11 +486,11 @@ static int iqs5xx_axis_init(struct i2c_client *client)
{
struct iqs5xx_private *iqs5xx = i2c_get_clientdata(client);
struct touchscreen_properties *prop = &iqs5xx->prop;
- struct input_dev *input;
+ struct input_dev *input = iqs5xx->input;
u16 max_x, max_y;
int error;
- if (!iqs5xx->input) {
+ if (!input) {
input = devm_input_allocate_device(&client->dev);
if (!input)
return -ENOMEM;
@@ -512,11 +512,11 @@ static int iqs5xx_axis_init(struct i2c_client *client)
if (error)
return error;
- input_set_abs_params(iqs5xx->input, ABS_MT_POSITION_X, 0, max_x, 0, 0);
- input_set_abs_params(iqs5xx->input, ABS_MT_POSITION_Y, 0, max_y, 0, 0);
- input_set_abs_params(iqs5xx->input, ABS_MT_PRESSURE, 0, U16_MAX, 0, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_X, 0, max_x, 0, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_Y, 0, max_y, 0, 0);
+ input_set_abs_params(input, ABS_MT_PRESSURE, 0, U16_MAX, 0, 0);
- touchscreen_parse_properties(iqs5xx->input, true, prop);
+ touchscreen_parse_properties(input, true, prop);
/*
* The device reserves 0xFFFF for coordinates that correspond to slots
@@ -540,7 +540,7 @@ static int iqs5xx_axis_init(struct i2c_client *client)
return error;
}
- error = input_mt_init_slots(iqs5xx->input, IQS5XX_NUM_CONTACTS,
+ error = input_mt_init_slots(input, IQS5XX_NUM_CONTACTS,
INPUT_MT_DIRECT);
if (error)
dev_err(&client->dev, "Failed to initialize slots: %d\n",
@@ -674,7 +674,7 @@ static irqreturn_t iqs5xx_irq(int irq, void *data)
input_mt_slot(input, i);
if (input_mt_report_slot_state(input, MT_TOOL_FINGER,
pressure != 0)) {
- touchscreen_report_pos(iqs5xx->input, &iqs5xx->prop,
+ touchscreen_report_pos(input, &iqs5xx->prop,
be16_to_cpu(touch_data->abs_x),
be16_to_cpu(touch_data->abs_y),
true);
diff --git a/drivers/input/touchscreen/stmfts.c b/drivers/input/touchscreen/stmfts.c
index bc11203c9cf7..72e0b767e1ba 100644
--- a/drivers/input/touchscreen/stmfts.c
+++ b/drivers/input/touchscreen/stmfts.c
@@ -339,11 +339,11 @@ static int stmfts_input_open(struct input_dev *dev)
err = pm_runtime_get_sync(&sdata->client->dev);
if (err < 0)
- return err;
+ goto out;
err = i2c_smbus_write_byte(sdata->client, STMFTS_MS_MT_SENSE_ON);
if (err)
- return err;
+ goto out;
mutex_lock(&sdata->mutex);
sdata->running = true;
@@ -366,7 +366,9 @@ static int stmfts_input_open(struct input_dev *dev)
"failed to enable touchkey\n");
}
- return 0;
+out:
+ pm_runtime_put_noidle(&sdata->client->dev);
+ return err;
}
static void stmfts_input_close(struct input_dev *dev)
diff --git a/drivers/input/touchscreen/tsc200x-core.c b/drivers/input/touchscreen/tsc200x-core.c
index 27810f6c69f6..72c7258b93a5 100644
--- a/drivers/input/touchscreen/tsc200x-core.c
+++ b/drivers/input/touchscreen/tsc200x-core.c
@@ -88,6 +88,8 @@ struct tsc200x {
int in_z1;
int in_z2;
+ struct touchscreen_properties prop;
+
spinlock_t lock;
struct timer_list penup_timer;
@@ -113,8 +115,7 @@ static void tsc200x_update_pen_state(struct tsc200x *ts,
int x, int y, int pressure)
{
if (pressure) {
- input_report_abs(ts->idev, ABS_X, x);
- input_report_abs(ts->idev, ABS_Y, y);
+ touchscreen_report_pos(ts->idev, &ts->prop, x, y, false);
input_report_abs(ts->idev, ABS_PRESSURE, pressure);
if (!ts->pen_down) {
input_report_key(ts->idev, BTN_TOUCH, !!pressure);
@@ -533,7 +534,7 @@ int tsc200x_probe(struct device *dev, int irq, const struct input_id *tsc_id,
input_set_abs_params(input_dev, ABS_PRESSURE,
0, MAX_12BIT, TSC200X_DEF_P_FUZZ, 0);
- touchscreen_parse_properties(input_dev, false, NULL);
+ touchscreen_parse_properties(input_dev, false, &ts->prop);
/* Ensure the touchscreen is off */
tsc200x_stop_scan(ts);
diff --git a/drivers/input/vivaldi-fmap.c b/drivers/input/vivaldi-fmap.c
new file mode 100644
index 000000000000..6dae83d96806
--- /dev/null
+++ b/drivers/input/vivaldi-fmap.c
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Helpers for ChromeOS Vivaldi keyboard function row mapping
+ *
+ * Copyright (C) 2022 Google, Inc
+ */
+
+#include <linux/export.h>
+#include <linux/input/vivaldi-fmap.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+
+/**
+ * vivaldi_function_row_physmap_show - Print vivaldi function row physmap attribute
+ * @data: The vivaldi function row map
+ * @buf: Buffer to print the function row phsymap to
+ */
+ssize_t vivaldi_function_row_physmap_show(const struct vivaldi_data *data,
+ char *buf)
+{
+ ssize_t size = 0;
+ int i;
+ const u32 *physmap = data->function_row_physmap;
+
+ if (!data->num_function_row_keys)
+ return 0;
+
+ for (i = 0; i < data->num_function_row_keys; i++)
+ size += scnprintf(buf + size, PAGE_SIZE - size,
+ "%s%02X", size ? " " : "", physmap[i]);
+ if (size)
+ size += scnprintf(buf + size, PAGE_SIZE - size, "\n");
+
+ return size;
+}
+EXPORT_SYMBOL_GPL(vivaldi_function_row_physmap_show);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
index 4081cb6cf7b3..4277853c7535 100644
--- a/drivers/md/dm-core.h
+++ b/drivers/md/dm-core.h
@@ -210,7 +210,7 @@ struct dm_table {
#define DM_TIO_MAGIC 28714
struct dm_target_io {
unsigned short magic;
- unsigned short flags;
+ blk_short_t flags;
unsigned int target_bio_nr;
struct dm_io *io;
struct dm_target *ti;
@@ -244,7 +244,7 @@ static inline void dm_tio_set_flag(struct dm_target_io *tio, unsigned int bit)
#define DM_IO_MAGIC 19577
struct dm_io {
unsigned short magic;
- unsigned short flags;
+ blk_short_t flags;
atomic_t io_count;
struct mapped_device *md;
struct bio *orig_bio;
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index c58a5111cb57..ad2d5faa2ebb 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -2472,9 +2472,11 @@ static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start,
dm_integrity_io_error(ic, "invalid sector in journal", -EIO);
sec &= ~(sector_t)(ic->sectors_per_block - 1);
}
+ if (unlikely(sec >= ic->provided_data_sectors)) {
+ journal_entry_set_unused(je);
+ continue;
+ }
}
- if (unlikely(sec >= ic->provided_data_sectors))
- continue;
get_area_and_offset(ic, sec, &area, &offset);
restore_last_bytes(ic, access_journal_data(ic, i, j), je);
for (k = j + 1; k < ic->journal_section_entries; k++) {
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 901abd6dea41..87310fceb0d8 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -891,15 +891,21 @@ static struct hash_cell *__find_device_hash_cell(struct dm_ioctl *param)
struct hash_cell *hc = NULL;
if (*param->uuid) {
- if (*param->name || param->dev)
+ if (*param->name || param->dev) {
+ DMERR("Invalid ioctl structure: uuid %s, name %s, dev %llx",
+ param->uuid, param->name, (unsigned long long)param->dev);
return NULL;
+ }
hc = __get_uuid_cell(param->uuid);
if (!hc)
return NULL;
} else if (*param->name) {
- if (param->dev)
+ if (param->dev) {
+ DMERR("Invalid ioctl structure: name %s, dev %llx",
+ param->name, (unsigned long long)param->dev);
return NULL;
+ }
hc = __get_name_cell(param->name);
if (!hc)
@@ -1851,8 +1857,11 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kern
if (copy_from_user(param_kernel, user, minimum_data_size))
return -EFAULT;
- if (param_kernel->data_size < minimum_data_size)
+ if (param_kernel->data_size < minimum_data_size) {
+ DMERR("Invalid data size in the ioctl structure: %u",
+ param_kernel->data_size);
return -EINVAL;
+ }
secure_data = param_kernel->flags & DM_SECURE_DATA_FLAG;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index ad2e0bbeb559..3c5fad7c4ee6 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -892,13 +892,19 @@ static void dm_io_complete(struct dm_io *io)
if (unlikely(wq_has_sleeper(&md->wait)))
wake_up(&md->wait);
- if (io_error == BLK_STS_DM_REQUEUE) {
- /*
- * Upper layer won't help us poll split bio, io->orig_bio
- * may only reflect a subset of the pre-split original,
- * so clear REQ_POLLED in case of requeue
- */
- bio->bi_opf &= ~REQ_POLLED;
+ if (io_error == BLK_STS_DM_REQUEUE || io_error == BLK_STS_AGAIN) {
+ if (bio->bi_opf & REQ_POLLED) {
+ /*
+ * Upper layer won't help us poll split bio (io->orig_bio
+ * may only reflect a subset of the pre-split original)
+ * so clear REQ_POLLED in case of requeue.
+ */
+ bio->bi_opf &= ~REQ_POLLED;
+ if (io_error == BLK_STS_AGAIN) {
+ /* io_uring doesn't handle BLK_STS_AGAIN (yet) */
+ queue_io(md, bio);
+ }
+ }
return;
}
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 677fa4bf76d3..efb85c6d8e2d 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1830,9 +1830,6 @@ static void nvme_update_disk_info(struct gendisk *disk,
nvme_config_discard(disk, ns);
blk_queue_max_write_zeroes_sectors(disk->queue,
ns->ctrl->max_zeroes_sectors);
-
- set_disk_ro(disk, (id->nsattr & NVME_NS_ATTR_RO) ||
- test_bit(NVME_NS_FORCE_RO, &ns->flags));
}
static inline bool nvme_first_scan(struct gendisk *disk)
@@ -1891,6 +1888,8 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id)
goto out_unfreeze;
}
+ set_disk_ro(ns->disk, (id->nsattr & NVME_NS_ATTR_RO) ||
+ test_bit(NVME_NS_FORCE_RO, &ns->flags));
set_bit(NVME_NS_READY, &ns->flags);
blk_mq_unfreeze_queue(ns->disk->queue);
@@ -1903,6 +1902,9 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id)
if (nvme_ns_head_multipath(ns->head)) {
blk_mq_freeze_queue(ns->head->disk->queue);
nvme_update_disk_info(ns->head->disk, ns, id);
+ set_disk_ro(ns->head->disk,
+ (id->nsattr & NVME_NS_ATTR_RO) ||
+ test_bit(NVME_NS_FORCE_RO, &ns->flags));
nvme_mpath_revalidate_paths(ns);
blk_stack_limits(&ns->head->disk->queue->limits,
&ns->queue->limits, 0);
@@ -3589,15 +3591,20 @@ static const struct attribute_group *nvme_dev_attr_groups[] = {
NULL,
};
-static struct nvme_ns_head *nvme_find_ns_head(struct nvme_subsystem *subsys,
+static struct nvme_ns_head *nvme_find_ns_head(struct nvme_ctrl *ctrl,
unsigned nsid)
{
struct nvme_ns_head *h;
- lockdep_assert_held(&subsys->lock);
+ lockdep_assert_held(&ctrl->subsys->lock);
- list_for_each_entry(h, &subsys->nsheads, entry) {
- if (h->ns_id != nsid)
+ list_for_each_entry(h, &ctrl->subsys->nsheads, entry) {
+ /*
+ * Private namespaces can share NSIDs under some conditions.
+ * In that case we can't use the same ns_head for namespaces
+ * with the same NSID.
+ */
+ if (h->ns_id != nsid || !nvme_is_unique_nsid(ctrl, h))
continue;
if (!list_empty(&h->list) && nvme_tryget_ns_head(h))
return h;
@@ -3791,7 +3798,7 @@ static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
}
mutex_lock(&ctrl->subsys->lock);
- head = nvme_find_ns_head(ctrl->subsys, nsid);
+ head = nvme_find_ns_head(ctrl, nsid);
if (!head) {
ret = nvme_subsys_check_duplicate_ids(ctrl->subsys, ids);
if (ret) {
@@ -3988,6 +3995,16 @@ static void nvme_ns_remove(struct nvme_ns *ns)
set_capacity(ns->disk, 0);
nvme_fault_inject_fini(&ns->fault_inject);
+ /*
+ * Ensure that !NVME_NS_READY is seen by other threads to prevent
+ * this ns going back into current_path.
+ */
+ synchronize_srcu(&ns->head->srcu);
+
+ /* wait for concurrent submissions */
+ if (nvme_mpath_clear_current_path(ns))
+ synchronize_srcu(&ns->head->srcu);
+
mutex_lock(&ns->ctrl->subsys->lock);
list_del_rcu(&ns->siblings);
if (list_empty(&ns->head->list)) {
@@ -3999,10 +4016,6 @@ static void nvme_ns_remove(struct nvme_ns *ns)
/* guarantee not available in head->list */
synchronize_rcu();
- /* wait for concurrent submissions */
- if (nvme_mpath_clear_current_path(ns))
- synchronize_srcu(&ns->head->srcu);
-
if (!nvme_ns_head_multipath(ns->head))
nvme_cdev_del(&ns->cdev, &ns->cdev_device);
del_gendisk(ns->disk);
@@ -4480,6 +4493,7 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl)
if (ctrl->queue_count > 1) {
nvme_queue_scan(ctrl);
nvme_start_queues(ctrl);
+ nvme_mpath_update(ctrl);
}
nvme_change_uevent(ctrl, "NVME_EVENT=connected");
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 1b31f19e1053..d464fdf978fb 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -482,10 +482,11 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
/*
* Add a multipath node if the subsystems supports multiple controllers.
- * We also do this for private namespaces as the namespace sharing data could
- * change after a rescan.
+ * We also do this for private namespaces as the namespace sharing flag
+ * could change after a rescan.
*/
- if (!(ctrl->subsys->cmic & NVME_CTRL_CMIC_MULTI_CTRL) || !multipath)
+ if (!(ctrl->subsys->cmic & NVME_CTRL_CMIC_MULTI_CTRL) ||
+ !nvme_is_unique_nsid(ctrl, head) || !multipath)
return 0;
head->disk = blk_alloc_disk(ctrl->numa_node);
@@ -612,8 +613,17 @@ static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc,
ns->ana_grpid = le32_to_cpu(desc->grpid);
ns->ana_state = desc->state;
clear_bit(NVME_NS_ANA_PENDING, &ns->flags);
-
- if (nvme_state_is_live(ns->ana_state))
+ /*
+ * nvme_mpath_set_live() will trigger I/O to the multipath path device
+ * and in turn to this path device. However we cannot accept this I/O
+ * if the controller is not live. This may deadlock if called from
+ * nvme_mpath_init_identify() and the ctrl will never complete
+ * initialization, preventing I/O from completing. For this case we
+ * will reprocess the ANA log page in nvme_mpath_update() once the
+ * controller is ready.
+ */
+ if (nvme_state_is_live(ns->ana_state) &&
+ ns->ctrl->state == NVME_CTRL_LIVE)
nvme_mpath_set_live(ns);
}
@@ -700,6 +710,18 @@ static void nvme_ana_work(struct work_struct *work)
nvme_read_ana_log(ctrl);
}
+void nvme_mpath_update(struct nvme_ctrl *ctrl)
+{
+ u32 nr_change_groups = 0;
+
+ if (!ctrl->ana_log_buf)
+ return;
+
+ mutex_lock(&ctrl->ana_lock);
+ nvme_parse_ana_log(ctrl, &nr_change_groups, nvme_update_ana_state);
+ mutex_unlock(&ctrl->ana_lock);
+}
+
static void nvme_anatt_timeout(struct timer_list *t)
{
struct nvme_ctrl *ctrl = from_timer(ctrl, t, anatt_timer);
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index f4b674a8ce20..1393bbf82d71 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -723,6 +723,25 @@ static inline bool nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
return queue_live;
return __nvme_check_ready(ctrl, rq, queue_live);
}
+
+/*
+ * NSID shall be unique for all shared namespaces, or if at least one of the
+ * following conditions is met:
+ * 1. Namespace Management is supported by the controller
+ * 2. ANA is supported by the controller
+ * 3. NVM Set are supported by the controller
+ *
+ * In other case, private namespace are not required to report a unique NSID.
+ */
+static inline bool nvme_is_unique_nsid(struct nvme_ctrl *ctrl,
+ struct nvme_ns_head *head)
+{
+ return head->shared ||
+ (ctrl->oacs & NVME_CTRL_OACS_NS_MNGT_SUPP) ||
+ (ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA) ||
+ (ctrl->ctratt & NVME_CTRL_CTRATT_NVM_SETS);
+}
+
int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
void *buf, unsigned bufflen);
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
@@ -782,6 +801,7 @@ void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id);
void nvme_mpath_remove_disk(struct nvme_ns_head *head);
int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id);
void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl);
+void nvme_mpath_update(struct nvme_ctrl *ctrl);
void nvme_mpath_uninit(struct nvme_ctrl *ctrl);
void nvme_mpath_stop(struct nvme_ctrl *ctrl);
bool nvme_mpath_clear_current_path(struct nvme_ns *ns);
@@ -853,6 +873,9 @@ static inline int nvme_mpath_init_identify(struct nvme_ctrl *ctrl,
"Please enable CONFIG_NVME_MULTIPATH for full support of multi-port devices.\n");
return 0;
}
+static inline void nvme_mpath_update(struct nvme_ctrl *ctrl)
+{
+}
static inline void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
{
}
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 2e98ac3f3ad6..d817ca17463e 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -45,7 +45,7 @@
#define NVME_MAX_SEGS 127
static int use_threaded_interrupts;
-module_param(use_threaded_interrupts, int, 0);
+module_param(use_threaded_interrupts, int, 0444);
static bool use_cmb_sqes = true;
module_param(use_cmb_sqes, bool, 0444);
@@ -3467,7 +3467,10 @@ static const struct pci_device_id nvme_id_table[] = {
NVME_QUIRK_128_BYTES_SQES |
NVME_QUIRK_SHARED_TAGS |
NVME_QUIRK_SKIP_CID_GEN },
-
+ { PCI_DEVICE(0x144d, 0xa808), /* Samsung X5 */
+ .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY|
+ NVME_QUIRK_NO_DEEPEST_PS |
+ NVME_QUIRK_IGNORE_DEV_SUBNQN, },
{ PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
{ 0, }
};
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 46d0dab686dd..397daaf51f1b 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -988,7 +988,7 @@ void nvmet_execute_async_event(struct nvmet_req *req)
ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req;
mutex_unlock(&ctrl->lock);
- schedule_work(&ctrl->async_event_work);
+ queue_work(nvmet_wq, &ctrl->async_event_work);
}
void nvmet_execute_keep_alive(struct nvmet_req *req)
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index 8fedd1e052fe..e44b2988759e 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -1555,7 +1555,7 @@ static void nvmet_port_release(struct config_item *item)
struct nvmet_port *port = to_nvmet_port(item);
/* Let inflight controllers teardown complete */
- flush_scheduled_work();
+ flush_workqueue(nvmet_wq);
list_del(&port->global_entry);
kfree(port->ana_state);
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 64c2d2f3e25c..90e75324dae0 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -20,6 +20,9 @@ struct workqueue_struct *zbd_wq;
static const struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX];
static DEFINE_IDA(cntlid_ida);
+struct workqueue_struct *nvmet_wq;
+EXPORT_SYMBOL_GPL(nvmet_wq);
+
/*
* This read/write semaphore is used to synchronize access to configuration
* information on a target system that will result in discovery log page
@@ -205,7 +208,7 @@ void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
list_add_tail(&aen->entry, &ctrl->async_events);
mutex_unlock(&ctrl->lock);
- schedule_work(&ctrl->async_event_work);
+ queue_work(nvmet_wq, &ctrl->async_event_work);
}
static void nvmet_add_to_changed_ns_log(struct nvmet_ctrl *ctrl, __le32 nsid)
@@ -385,7 +388,7 @@ static void nvmet_keep_alive_timer(struct work_struct *work)
if (reset_tbkas) {
pr_debug("ctrl %d reschedule traffic based keep-alive timer\n",
ctrl->cntlid);
- schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
+ queue_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ);
return;
}
@@ -403,7 +406,7 @@ void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
pr_debug("ctrl %d start keep-alive timer for %d secs\n",
ctrl->cntlid, ctrl->kato);
- schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
+ queue_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ);
}
void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
@@ -1120,7 +1123,7 @@ static inline u8 nvmet_cc_iocqes(u32 cc)
static inline bool nvmet_css_supported(u8 cc_css)
{
- switch (cc_css <<= NVME_CC_CSS_SHIFT) {
+ switch (cc_css << NVME_CC_CSS_SHIFT) {
case NVME_CC_CSS_NVM:
case NVME_CC_CSS_CSI:
return true;
@@ -1478,7 +1481,7 @@ void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl)
mutex_lock(&ctrl->lock);
if (!(ctrl->csts & NVME_CSTS_CFS)) {
ctrl->csts |= NVME_CSTS_CFS;
- schedule_work(&ctrl->fatal_err_work);
+ queue_work(nvmet_wq, &ctrl->fatal_err_work);
}
mutex_unlock(&ctrl->lock);
}
@@ -1619,9 +1622,15 @@ static int __init nvmet_init(void)
goto out_free_zbd_work_queue;
}
+ nvmet_wq = alloc_workqueue("nvmet-wq", WQ_MEM_RECLAIM, 0);
+ if (!nvmet_wq) {
+ error = -ENOMEM;
+ goto out_free_buffered_work_queue;
+ }
+
error = nvmet_init_discovery();
if (error)
- goto out_free_work_queue;
+ goto out_free_nvmet_work_queue;
error = nvmet_init_configfs();
if (error)
@@ -1630,7 +1639,9 @@ static int __init nvmet_init(void)
out_exit_discovery:
nvmet_exit_discovery();
-out_free_work_queue:
+out_free_nvmet_work_queue:
+ destroy_workqueue(nvmet_wq);
+out_free_buffered_work_queue:
destroy_workqueue(buffered_io_wq);
out_free_zbd_work_queue:
destroy_workqueue(zbd_wq);
@@ -1642,6 +1653,7 @@ static void __exit nvmet_exit(void)
nvmet_exit_configfs();
nvmet_exit_discovery();
ida_destroy(&cntlid_ida);
+ destroy_workqueue(nvmet_wq);
destroy_workqueue(buffered_io_wq);
destroy_workqueue(zbd_wq);
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index de90001fc5c4..ab2627e17bb9 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -1491,7 +1491,7 @@ __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
if (!nvmet_fc_tgt_a_get(assoc))
continue;
- if (!schedule_work(&assoc->del_work))
+ if (!queue_work(nvmet_wq, &assoc->del_work))
/* already deleting - release local reference */
nvmet_fc_tgt_a_put(assoc);
}
@@ -1546,7 +1546,7 @@ nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port,
continue;
assoc->hostport->invalid = 1;
noassoc = false;
- if (!schedule_work(&assoc->del_work))
+ if (!queue_work(nvmet_wq, &assoc->del_work))
/* already deleting - release local reference */
nvmet_fc_tgt_a_put(assoc);
}
@@ -1592,7 +1592,7 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
nvmet_fc_tgtport_put(tgtport);
if (found_ctrl) {
- if (!schedule_work(&assoc->del_work))
+ if (!queue_work(nvmet_wq, &assoc->del_work))
/* already deleting - release local reference */
nvmet_fc_tgt_a_put(assoc);
return;
@@ -2060,7 +2060,7 @@ nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port,
iod->rqstdatalen = lsreqbuf_len;
iod->hosthandle = hosthandle;
- schedule_work(&iod->work);
+ queue_work(nvmet_wq, &iod->work);
return 0;
}
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index 54606f1872b4..5c16372f3b53 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -360,7 +360,7 @@ fcloop_h2t_ls_req(struct nvme_fc_local_port *localport,
spin_lock(&rport->lock);
list_add_tail(&rport->ls_list, &tls_req->ls_list);
spin_unlock(&rport->lock);
- schedule_work(&rport->ls_work);
+ queue_work(nvmet_wq, &rport->ls_work);
return ret;
}
@@ -393,7 +393,7 @@ fcloop_h2t_xmt_ls_rsp(struct nvmet_fc_target_port *targetport,
spin_lock(&rport->lock);
list_add_tail(&rport->ls_list, &tls_req->ls_list);
spin_unlock(&rport->lock);
- schedule_work(&rport->ls_work);
+ queue_work(nvmet_wq, &rport->ls_work);
}
return 0;
@@ -448,7 +448,7 @@ fcloop_t2h_ls_req(struct nvmet_fc_target_port *targetport, void *hosthandle,
spin_lock(&tport->lock);
list_add_tail(&tport->ls_list, &tls_req->ls_list);
spin_unlock(&tport->lock);
- schedule_work(&tport->ls_work);
+ queue_work(nvmet_wq, &tport->ls_work);
return ret;
}
@@ -480,7 +480,7 @@ fcloop_t2h_xmt_ls_rsp(struct nvme_fc_local_port *localport,
spin_lock(&tport->lock);
list_add_tail(&tport->ls_list, &tls_req->ls_list);
spin_unlock(&tport->lock);
- schedule_work(&tport->ls_work);
+ queue_work(nvmet_wq, &tport->ls_work);
}
return 0;
@@ -520,7 +520,7 @@ fcloop_tgt_discovery_evt(struct nvmet_fc_target_port *tgtport)
tgt_rscn->tport = tgtport->private;
INIT_WORK(&tgt_rscn->work, fcloop_tgt_rscn_work);
- schedule_work(&tgt_rscn->work);
+ queue_work(nvmet_wq, &tgt_rscn->work);
}
static void
@@ -739,7 +739,7 @@ fcloop_fcp_req(struct nvme_fc_local_port *localport,
INIT_WORK(&tfcp_req->tio_done_work, fcloop_tgt_fcprqst_done_work);
kref_init(&tfcp_req->ref);
- schedule_work(&tfcp_req->fcp_rcv_work);
+ queue_work(nvmet_wq, &tfcp_req->fcp_rcv_work);
return 0;
}
@@ -921,7 +921,7 @@ fcloop_fcp_req_release(struct nvmet_fc_target_port *tgtport,
{
struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
- schedule_work(&tfcp_req->tio_done_work);
+ queue_work(nvmet_wq, &tfcp_req->tio_done_work);
}
static void
@@ -976,7 +976,7 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
if (abortio)
/* leave the reference while the work item is scheduled */
- WARN_ON(!schedule_work(&tfcp_req->abort_rcv_work));
+ WARN_ON(!queue_work(nvmet_wq, &tfcp_req->abort_rcv_work));
else {
/*
* as the io has already had the done callback made,
diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c
index 6485dc8eb974..f3d58abf11e0 100644
--- a/drivers/nvme/target/io-cmd-file.c
+++ b/drivers/nvme/target/io-cmd-file.c
@@ -283,7 +283,7 @@ static void nvmet_file_execute_flush(struct nvmet_req *req)
if (!nvmet_check_transfer_len(req, 0))
return;
INIT_WORK(&req->f.work, nvmet_file_flush_work);
- schedule_work(&req->f.work);
+ queue_work(nvmet_wq, &req->f.work);
}
static void nvmet_file_execute_discard(struct nvmet_req *req)
@@ -343,7 +343,7 @@ static void nvmet_file_execute_dsm(struct nvmet_req *req)
if (!nvmet_check_data_len_lte(req, nvmet_dsm_len(req)))
return;
INIT_WORK(&req->f.work, nvmet_file_dsm_work);
- schedule_work(&req->f.work);
+ queue_work(nvmet_wq, &req->f.work);
}
static void nvmet_file_write_zeroes_work(struct work_struct *w)
@@ -373,7 +373,7 @@ static void nvmet_file_execute_write_zeroes(struct nvmet_req *req)
if (!nvmet_check_transfer_len(req, 0))
return;
INIT_WORK(&req->f.work, nvmet_file_write_zeroes_work);
- schedule_work(&req->f.work);
+ queue_work(nvmet_wq, &req->f.work);
}
u16 nvmet_file_parse_io_cmd(struct nvmet_req *req)
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 23f9d6f88804..59024af2da2e 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -166,7 +166,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
iod->req.transfer_len = blk_rq_payload_bytes(req);
}
- schedule_work(&iod->work);
+ queue_work(nvmet_wq, &iod->work);
return BLK_STS_OK;
}
@@ -187,7 +187,7 @@ static void nvme_loop_submit_async_event(struct nvme_ctrl *arg)
return;
}
- schedule_work(&iod->work);
+ queue_work(nvmet_wq, &iod->work);
}
static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl,
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index d910c6aad4b6..69818752a33a 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -366,6 +366,7 @@ struct nvmet_req {
extern struct workqueue_struct *buffered_io_wq;
extern struct workqueue_struct *zbd_wq;
+extern struct workqueue_struct *nvmet_wq;
static inline void nvmet_set_result(struct nvmet_req *req, u32 result)
{
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index a4de1e0d518b..5247c24538eb 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -283,7 +283,7 @@ static void nvmet_passthru_execute_cmd(struct nvmet_req *req)
if (req->p.use_workqueue || effects) {
INIT_WORK(&req->p.work, nvmet_passthru_execute_cmd_work);
req->p.rq = rq;
- schedule_work(&req->p.work);
+ queue_work(nvmet_wq, &req->p.work);
} else {
rq->end_io_data = req;
blk_execute_rq_nowait(rq, false, nvmet_passthru_req_done);
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 2446d0918a41..2fab0b219b25 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -1584,7 +1584,7 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
if (queue->host_qid == 0) {
/* Let inflight controller teardown complete */
- flush_scheduled_work();
+ flush_workqueue(nvmet_wq);
}
ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn);
@@ -1669,7 +1669,7 @@ static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
if (disconnect) {
rdma_disconnect(queue->cm_id);
- schedule_work(&queue->release_work);
+ queue_work(nvmet_wq, &queue->release_work);
}
}
@@ -1699,7 +1699,7 @@ static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id,
mutex_unlock(&nvmet_rdma_queue_mutex);
pr_err("failed to connect queue %d\n", queue->idx);
- schedule_work(&queue->release_work);
+ queue_work(nvmet_wq, &queue->release_work);
}
/**
@@ -1773,7 +1773,7 @@ static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id,
if (!queue) {
struct nvmet_rdma_port *port = cm_id->context;
- schedule_delayed_work(&port->repair_work, 0);
+ queue_delayed_work(nvmet_wq, &port->repair_work, 0);
break;
}
fallthrough;
@@ -1903,7 +1903,7 @@ static void nvmet_rdma_repair_port_work(struct work_struct *w)
nvmet_rdma_disable_port(port);
ret = nvmet_rdma_enable_port(port);
if (ret)
- schedule_delayed_work(&port->repair_work, 5 * HZ);
+ queue_delayed_work(nvmet_wq, &port->repair_work, 5 * HZ);
}
static int nvmet_rdma_add_port(struct nvmet_port *nport)
@@ -2053,7 +2053,7 @@ static void nvmet_rdma_remove_one(struct ib_device *ib_device, void *client_data
}
mutex_unlock(&nvmet_rdma_queue_mutex);
- flush_scheduled_work();
+ flush_workqueue(nvmet_wq);
}
static struct ib_client nvmet_rdma_ib_client = {
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index 83ca577f72be..2793554e622e 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -1269,7 +1269,7 @@ static void nvmet_tcp_schedule_release_queue(struct nvmet_tcp_queue *queue)
spin_lock(&queue->state_lock);
if (queue->state != NVMET_TCP_Q_DISCONNECTING) {
queue->state = NVMET_TCP_Q_DISCONNECTING;
- schedule_work(&queue->release_work);
+ queue_work(nvmet_wq, &queue->release_work);
}
spin_unlock(&queue->state_lock);
}
@@ -1684,7 +1684,7 @@ static void nvmet_tcp_listen_data_ready(struct sock *sk)
goto out;
if (sk->sk_state == TCP_LISTEN)
- schedule_work(&port->accept_work);
+ queue_work(nvmet_wq, &port->accept_work);
out:
read_unlock_bh(&sk->sk_callback_lock);
}
@@ -1815,7 +1815,7 @@ static u16 nvmet_tcp_install_queue(struct nvmet_sq *sq)
if (sq->qid == 0) {
/* Let inflight controller teardown complete */
- flush_scheduled_work();
+ flush_workqueue(nvmet_wq);
}
queue->nr_cmds = sq->size * 2;
@@ -1876,12 +1876,12 @@ static void __exit nvmet_tcp_exit(void)
nvmet_unregister_transport(&nvmet_tcp_ops);
- flush_scheduled_work();
+ flush_workqueue(nvmet_wq);
mutex_lock(&nvmet_tcp_queue_mutex);
list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
kernel_sock_shutdown(queue->sock, SHUT_RDWR);
mutex_unlock(&nvmet_tcp_queue_mutex);
- flush_scheduled_work();
+ flush_workqueue(nvmet_wq);
destroy_workqueue(nvmet_tcp_wq);
}
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index df84d221e3de..558b35aba610 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -766,14 +766,6 @@ static unsigned int hv_msi_get_int_vector(struct irq_data *irqd)
return irqd->parent_data->hwirq;
}
-static void hv_set_msi_entry_from_desc(union hv_msi_entry *msi_entry,
- struct msi_desc *msi_desc)
-{
- msi_entry->address = ((u64)msi_desc->msg.address_hi << 32) |
- msi_desc->msg.address_lo;
- msi_entry->data = msi_desc->msg.data;
-}
-
/*
* @nr_bm_irqs: Indicates the number of IRQs that were allocated from
* the bitmap.
diff --git a/drivers/platform/chrome/Makefile b/drivers/platform/chrome/Makefile
index 5d4be9735d9d..6420ca129548 100644
--- a/drivers/platform/chrome/Makefile
+++ b/drivers/platform/chrome/Makefile
@@ -2,6 +2,7 @@
# tell define_trace.h where to find the cros ec trace header
CFLAGS_cros_ec_trace.o:= -I$(src)
+CFLAGS_cros_ec_sensorhub_ring.o:= -I$(src)
obj-$(CONFIG_CHROMEOS_LAPTOP) += chromeos_laptop.o
obj-$(CONFIG_CHROMEOS_PRIVACY_SCREEN) += chromeos_privacy_screen.o
@@ -21,7 +22,7 @@ obj-$(CONFIG_CROS_EC_CHARDEV) += cros_ec_chardev.o
obj-$(CONFIG_CROS_EC_LIGHTBAR) += cros_ec_lightbar.o
obj-$(CONFIG_CROS_EC_VBC) += cros_ec_vbc.o
obj-$(CONFIG_CROS_EC_DEBUGFS) += cros_ec_debugfs.o
-cros-ec-sensorhub-objs := cros_ec_sensorhub.o cros_ec_sensorhub_ring.o cros_ec_trace.o
+cros-ec-sensorhub-objs := cros_ec_sensorhub.o cros_ec_sensorhub_ring.o
obj-$(CONFIG_CROS_EC_SENSORHUB) += cros-ec-sensorhub.o
obj-$(CONFIG_CROS_EC_SYSFS) += cros_ec_sysfs.o
obj-$(CONFIG_CROS_USBPD_LOGGER) += cros_usbpd_logger.o
diff --git a/drivers/platform/chrome/cros_ec_debugfs.c b/drivers/platform/chrome/cros_ec_debugfs.c
index 272c89837d74..0dbceee87a4b 100644
--- a/drivers/platform/chrome/cros_ec_debugfs.c
+++ b/drivers/platform/chrome/cros_ec_debugfs.c
@@ -25,6 +25,9 @@
#define CIRC_ADD(idx, size, value) (((idx) + (value)) & ((size) - 1))
+/* waitqueue for log readers */
+static DECLARE_WAIT_QUEUE_HEAD(cros_ec_debugfs_log_wq);
+
/**
* struct cros_ec_debugfs - EC debugging information.
*
@@ -33,7 +36,6 @@
* @log_buffer: circular buffer for console log information
* @read_msg: preallocated EC command and buffer to read console log
* @log_mutex: mutex to protect circular buffer
- * @log_wq: waitqueue for log readers
* @log_poll_work: recurring task to poll EC for new console log data
* @panicinfo_blob: panicinfo debugfs blob
*/
@@ -44,7 +46,6 @@ struct cros_ec_debugfs {
struct circ_buf log_buffer;
struct cros_ec_command *read_msg;
struct mutex log_mutex;
- wait_queue_head_t log_wq;
struct delayed_work log_poll_work;
/* EC panicinfo */
struct debugfs_blob_wrapper panicinfo_blob;
@@ -107,7 +108,7 @@ static void cros_ec_console_log_work(struct work_struct *__work)
buf_space--;
}
- wake_up(&debug_info->log_wq);
+ wake_up(&cros_ec_debugfs_log_wq);
}
mutex_unlock(&debug_info->log_mutex);
@@ -141,7 +142,7 @@ static ssize_t cros_ec_console_log_read(struct file *file, char __user *buf,
mutex_unlock(&debug_info->log_mutex);
- ret = wait_event_interruptible(debug_info->log_wq,
+ ret = wait_event_interruptible(cros_ec_debugfs_log_wq,
CIRC_CNT(cb->head, cb->tail, LOG_SIZE));
if (ret < 0)
return ret;
@@ -173,7 +174,7 @@ static __poll_t cros_ec_console_log_poll(struct file *file,
struct cros_ec_debugfs *debug_info = file->private_data;
__poll_t mask = 0;
- poll_wait(file, &debug_info->log_wq, wait);
+ poll_wait(file, &cros_ec_debugfs_log_wq, wait);
mutex_lock(&debug_info->log_mutex);
if (CIRC_CNT(debug_info->log_buffer.head,
@@ -377,7 +378,6 @@ static int cros_ec_create_console_log(struct cros_ec_debugfs *debug_info)
debug_info->log_buffer.tail = 0;
mutex_init(&debug_info->log_mutex);
- init_waitqueue_head(&debug_info->log_wq);
debugfs_create_file("console_log", S_IFREG | 0444, debug_info->dir,
debug_info, &cros_ec_console_log_fops);
diff --git a/drivers/platform/chrome/cros_ec_sensorhub_ring.c b/drivers/platform/chrome/cros_ec_sensorhub_ring.c
index 98e37080f760..71948dade0e2 100644
--- a/drivers/platform/chrome/cros_ec_sensorhub_ring.c
+++ b/drivers/platform/chrome/cros_ec_sensorhub_ring.c
@@ -17,7 +17,8 @@
#include <linux/sort.h>
#include <linux/slab.h>
-#include "cros_ec_trace.h"
+#define CREATE_TRACE_POINTS
+#include "cros_ec_sensorhub_trace.h"
/* Precision of fixed point for the m values from the filter */
#define M_PRECISION BIT(23)
diff --git a/drivers/platform/chrome/cros_ec_sensorhub_trace.h b/drivers/platform/chrome/cros_ec_sensorhub_trace.h
new file mode 100644
index 000000000000..57d9b4785969
--- /dev/null
+++ b/drivers/platform/chrome/cros_ec_sensorhub_trace.h
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Trace events for the ChromeOS Sensorhub kernel module
+ *
+ * Copyright 2021 Google LLC.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM cros_ec
+
+#if !defined(_CROS_EC_SENSORHUB_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _CROS_EC_SENSORHUB_TRACE_H_
+
+#include <linux/types.h>
+#include <linux/platform_data/cros_ec_sensorhub.h>
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(cros_ec_sensorhub_timestamp,
+ TP_PROTO(u32 ec_sample_timestamp, u32 ec_fifo_timestamp, s64 fifo_timestamp,
+ s64 current_timestamp, s64 current_time),
+ TP_ARGS(ec_sample_timestamp, ec_fifo_timestamp, fifo_timestamp, current_timestamp,
+ current_time),
+ TP_STRUCT__entry(
+ __field(u32, ec_sample_timestamp)
+ __field(u32, ec_fifo_timestamp)
+ __field(s64, fifo_timestamp)
+ __field(s64, current_timestamp)
+ __field(s64, current_time)
+ __field(s64, delta)
+ ),
+ TP_fast_assign(
+ __entry->ec_sample_timestamp = ec_sample_timestamp;
+ __entry->ec_fifo_timestamp = ec_fifo_timestamp;
+ __entry->fifo_timestamp = fifo_timestamp;
+ __entry->current_timestamp = current_timestamp;
+ __entry->current_time = current_time;
+ __entry->delta = current_timestamp - current_time;
+ ),
+ TP_printk("ec_ts: %9u, ec_fifo_ts: %9u, fifo_ts: %12lld, curr_ts: %12lld, curr_time: %12lld, delta %12lld",
+ __entry->ec_sample_timestamp,
+ __entry->ec_fifo_timestamp,
+ __entry->fifo_timestamp,
+ __entry->current_timestamp,
+ __entry->current_time,
+ __entry->delta
+ )
+);
+
+TRACE_EVENT(cros_ec_sensorhub_data,
+ TP_PROTO(u32 ec_sensor_num, u32 ec_fifo_timestamp, s64 fifo_timestamp,
+ s64 current_timestamp, s64 current_time),
+ TP_ARGS(ec_sensor_num, ec_fifo_timestamp, fifo_timestamp, current_timestamp, current_time),
+ TP_STRUCT__entry(
+ __field(u32, ec_sensor_num)
+ __field(u32, ec_fifo_timestamp)
+ __field(s64, fifo_timestamp)
+ __field(s64, current_timestamp)
+ __field(s64, current_time)
+ __field(s64, delta)
+ ),
+ TP_fast_assign(
+ __entry->ec_sensor_num = ec_sensor_num;
+ __entry->ec_fifo_timestamp = ec_fifo_timestamp;
+ __entry->fifo_timestamp = fifo_timestamp;
+ __entry->current_timestamp = current_timestamp;
+ __entry->current_time = current_time;
+ __entry->delta = current_timestamp - current_time;
+ ),
+ TP_printk("ec_num: %4u, ec_fifo_ts: %9u, fifo_ts: %12lld, curr_ts: %12lld, curr_time: %12lld, delta %12lld",
+ __entry->ec_sensor_num,
+ __entry->ec_fifo_timestamp,
+ __entry->fifo_timestamp,
+ __entry->current_timestamp,
+ __entry->current_time,
+ __entry->delta
+ )
+);
+
+TRACE_EVENT(cros_ec_sensorhub_filter,
+ TP_PROTO(struct cros_ec_sensors_ts_filter_state *state, s64 dx, s64 dy),
+ TP_ARGS(state, dx, dy),
+ TP_STRUCT__entry(
+ __field(s64, dx)
+ __field(s64, dy)
+ __field(s64, median_m)
+ __field(s64, median_error)
+ __field(s64, history_len)
+ __field(s64, x)
+ __field(s64, y)
+ ),
+ TP_fast_assign(
+ __entry->dx = dx;
+ __entry->dy = dy;
+ __entry->median_m = state->median_m;
+ __entry->median_error = state->median_error;
+ __entry->history_len = state->history_len;
+ __entry->x = state->x_offset;
+ __entry->y = state->y_offset;
+ ),
+ TP_printk("dx: %12lld. dy: %12lld median_m: %12lld median_error: %12lld len: %lld x: %12lld y: %12lld",
+ __entry->dx,
+ __entry->dy,
+ __entry->median_m,
+ __entry->median_error,
+ __entry->history_len,
+ __entry->x,
+ __entry->y
+ )
+);
+
+
+#endif /* _CROS_EC_SENSORHUB_TRACE_H_ */
+
+/* this part must be outside header guard */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE cros_ec_sensorhub_trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/platform/chrome/cros_ec_trace.h b/drivers/platform/chrome/cros_ec_trace.h
index 7e7cfc98657a..9bb5cd2c98b8 100644
--- a/drivers/platform/chrome/cros_ec_trace.h
+++ b/drivers/platform/chrome/cros_ec_trace.h
@@ -15,7 +15,6 @@
#include <linux/types.h>
#include <linux/platform_data/cros_ec_commands.h>
#include <linux/platform_data/cros_ec_proto.h>
-#include <linux/platform_data/cros_ec_sensorhub.h>
#include <linux/tracepoint.h>
@@ -71,100 +70,6 @@ TRACE_EVENT(cros_ec_request_done,
__entry->retval)
);
-TRACE_EVENT(cros_ec_sensorhub_timestamp,
- TP_PROTO(u32 ec_sample_timestamp, u32 ec_fifo_timestamp, s64 fifo_timestamp,
- s64 current_timestamp, s64 current_time),
- TP_ARGS(ec_sample_timestamp, ec_fifo_timestamp, fifo_timestamp, current_timestamp,
- current_time),
- TP_STRUCT__entry(
- __field(u32, ec_sample_timestamp)
- __field(u32, ec_fifo_timestamp)
- __field(s64, fifo_timestamp)
- __field(s64, current_timestamp)
- __field(s64, current_time)
- __field(s64, delta)
- ),
- TP_fast_assign(
- __entry->ec_sample_timestamp = ec_sample_timestamp;
- __entry->ec_fifo_timestamp = ec_fifo_timestamp;
- __entry->fifo_timestamp = fifo_timestamp;
- __entry->current_timestamp = current_timestamp;
- __entry->current_time = current_time;
- __entry->delta = current_timestamp - current_time;
- ),
- TP_printk("ec_ts: %9u, ec_fifo_ts: %9u, fifo_ts: %12lld, curr_ts: %12lld, curr_time: %12lld, delta %12lld",
- __entry->ec_sample_timestamp,
- __entry->ec_fifo_timestamp,
- __entry->fifo_timestamp,
- __entry->current_timestamp,
- __entry->current_time,
- __entry->delta
- )
-);
-
-TRACE_EVENT(cros_ec_sensorhub_data,
- TP_PROTO(u32 ec_sensor_num, u32 ec_fifo_timestamp, s64 fifo_timestamp,
- s64 current_timestamp, s64 current_time),
- TP_ARGS(ec_sensor_num, ec_fifo_timestamp, fifo_timestamp, current_timestamp, current_time),
- TP_STRUCT__entry(
- __field(u32, ec_sensor_num)
- __field(u32, ec_fifo_timestamp)
- __field(s64, fifo_timestamp)
- __field(s64, current_timestamp)
- __field(s64, current_time)
- __field(s64, delta)
- ),
- TP_fast_assign(
- __entry->ec_sensor_num = ec_sensor_num;
- __entry->ec_fifo_timestamp = ec_fifo_timestamp;
- __entry->fifo_timestamp = fifo_timestamp;
- __entry->current_timestamp = current_timestamp;
- __entry->current_time = current_time;
- __entry->delta = current_timestamp - current_time;
- ),
- TP_printk("ec_num: %4u, ec_fifo_ts: %9u, fifo_ts: %12lld, curr_ts: %12lld, curr_time: %12lld, delta %12lld",
- __entry->ec_sensor_num,
- __entry->ec_fifo_timestamp,
- __entry->fifo_timestamp,
- __entry->current_timestamp,
- __entry->current_time,
- __entry->delta
- )
-);
-
-TRACE_EVENT(cros_ec_sensorhub_filter,
- TP_PROTO(struct cros_ec_sensors_ts_filter_state *state, s64 dx, s64 dy),
- TP_ARGS(state, dx, dy),
- TP_STRUCT__entry(
- __field(s64, dx)
- __field(s64, dy)
- __field(s64, median_m)
- __field(s64, median_error)
- __field(s64, history_len)
- __field(s64, x)
- __field(s64, y)
- ),
- TP_fast_assign(
- __entry->dx = dx;
- __entry->dy = dy;
- __entry->median_m = state->median_m;
- __entry->median_error = state->median_error;
- __entry->history_len = state->history_len;
- __entry->x = state->x_offset;
- __entry->y = state->y_offset;
- ),
- TP_printk("dx: %12lld. dy: %12lld median_m: %12lld median_error: %12lld len: %lld x: %12lld y: %12lld",
- __entry->dx,
- __entry->dy,
- __entry->median_m,
- __entry->median_error,
- __entry->history_len,
- __entry->x,
- __entry->y
- )
-);
-
-
#endif /* _CROS_EC_TRACE_H_ */
/* this part must be outside header guard */
diff --git a/drivers/platform/chrome/cros_ec_typec.c b/drivers/platform/chrome/cros_ec_typec.c
index 5de0bfb0bc4d..4bd2752c0823 100644
--- a/drivers/platform/chrome/cros_ec_typec.c
+++ b/drivers/platform/chrome/cros_ec_typec.c
@@ -115,17 +115,18 @@ static int cros_typec_parse_port_props(struct typec_capability *cap,
return ret;
cap->data = ret;
+ /* Try-power-role is optional. */
ret = fwnode_property_read_string(fwnode, "try-power-role", &buf);
if (ret) {
- dev_err(dev, "try-power-role not found: %d\n", ret);
- return ret;
+ dev_warn(dev, "try-power-role not found: %d\n", ret);
+ cap->prefer_role = TYPEC_NO_PREFERRED_ROLE;
+ } else {
+ ret = typec_find_power_role(buf);
+ if (ret < 0)
+ return ret;
+ cap->prefer_role = ret;
}
- ret = typec_find_power_role(buf);
- if (ret < 0)
- return ret;
- cap->prefer_role = ret;
-
cap->fwnode = fwnode;
return 0;
@@ -227,6 +228,7 @@ static void cros_typec_remove_partner(struct cros_typec_data *typec,
cros_typec_unregister_altmodes(typec, port_num, true);
cros_typec_usb_disconnect_state(port);
+ port->mux_flags = USB_PD_MUX_NONE;
typec_unregister_partner(port->partner);
port->partner = NULL;
@@ -512,20 +514,38 @@ static int cros_typec_enable_usb4(struct cros_typec_data *typec,
}
static int cros_typec_configure_mux(struct cros_typec_data *typec, int port_num,
- uint8_t mux_flags,
struct ec_response_usb_pd_control_v2 *pd_ctrl)
{
struct cros_typec_port *port = typec->ports[port_num];
+ struct ec_response_usb_pd_mux_info resp;
+ struct ec_params_usb_pd_mux_info req = {
+ .port = port_num,
+ };
struct ec_params_usb_pd_mux_ack mux_ack;
enum typec_orientation orientation;
int ret;
- if (mux_flags == USB_PD_MUX_NONE) {
+ ret = cros_ec_command(typec->ec, 0, EC_CMD_USB_PD_MUX_INFO,
+ &req, sizeof(req), &resp, sizeof(resp));
+ if (ret < 0) {
+ dev_warn(typec->dev, "Failed to get mux info for port: %d, err = %d\n",
+ port_num, ret);
+ return ret;
+ }
+
+ /* No change needs to be made, let's exit early. */
+ if (port->mux_flags == resp.flags && port->role == pd_ctrl->role)
+ return 0;
+
+ port->mux_flags = resp.flags;
+ port->role = pd_ctrl->role;
+
+ if (port->mux_flags == USB_PD_MUX_NONE) {
ret = cros_typec_usb_disconnect_state(port);
goto mux_ack;
}
- if (mux_flags & USB_PD_MUX_POLARITY_INVERTED)
+ if (port->mux_flags & USB_PD_MUX_POLARITY_INVERTED)
orientation = TYPEC_ORIENTATION_REVERSE;
else
orientation = TYPEC_ORIENTATION_NORMAL;
@@ -540,22 +560,22 @@ static int cros_typec_configure_mux(struct cros_typec_data *typec, int port_num,
if (ret)
return ret;
- if (mux_flags & USB_PD_MUX_USB4_ENABLED) {
+ if (port->mux_flags & USB_PD_MUX_USB4_ENABLED) {
ret = cros_typec_enable_usb4(typec, port_num, pd_ctrl);
- } else if (mux_flags & USB_PD_MUX_TBT_COMPAT_ENABLED) {
+ } else if (port->mux_flags & USB_PD_MUX_TBT_COMPAT_ENABLED) {
ret = cros_typec_enable_tbt(typec, port_num, pd_ctrl);
- } else if (mux_flags & USB_PD_MUX_DP_ENABLED) {
+ } else if (port->mux_flags & USB_PD_MUX_DP_ENABLED) {
ret = cros_typec_enable_dp(typec, port_num, pd_ctrl);
- } else if (mux_flags & USB_PD_MUX_SAFE_MODE) {
+ } else if (port->mux_flags & USB_PD_MUX_SAFE_MODE) {
ret = cros_typec_usb_safe_state(port);
- } else if (mux_flags & USB_PD_MUX_USB_ENABLED) {
+ } else if (port->mux_flags & USB_PD_MUX_USB_ENABLED) {
port->state.alt = NULL;
port->state.mode = TYPEC_STATE_USB;
ret = typec_mux_set(port->mux, &port->state);
} else {
dev_dbg(typec->dev,
"Unrecognized mode requested, mux flags: %x\n",
- mux_flags);
+ port->mux_flags);
}
mux_ack:
@@ -630,17 +650,6 @@ static void cros_typec_set_port_params_v1(struct cros_typec_data *typec,
}
}
-static int cros_typec_get_mux_info(struct cros_typec_data *typec, int port_num,
- struct ec_response_usb_pd_mux_info *resp)
-{
- struct ec_params_usb_pd_mux_info req = {
- .port = port_num,
- };
-
- return cros_ec_command(typec->ec, 0, EC_CMD_USB_PD_MUX_INFO, &req,
- sizeof(req), resp, sizeof(*resp));
-}
-
/*
* Helper function to register partner/plug altmodes.
*/
@@ -938,7 +947,6 @@ static int cros_typec_port_update(struct cros_typec_data *typec, int port_num)
{
struct ec_params_usb_pd_control req;
struct ec_response_usb_pd_control_v2 resp;
- struct ec_response_usb_pd_mux_info mux_resp;
int ret;
if (port_num < 0 || port_num >= typec->num_ports) {
@@ -958,6 +966,11 @@ static int cros_typec_port_update(struct cros_typec_data *typec, int port_num)
if (ret < 0)
return ret;
+ /* Update the switches if they exist, according to requested state */
+ ret = cros_typec_configure_mux(typec, port_num, &resp);
+ if (ret)
+ dev_warn(typec->dev, "Configure muxes failed, err = %d\n", ret);
+
dev_dbg(typec->dev, "Enabled %d: 0x%hhx\n", port_num, resp.enabled);
dev_dbg(typec->dev, "Role %d: 0x%hhx\n", port_num, resp.role);
dev_dbg(typec->dev, "Polarity %d: 0x%hhx\n", port_num, resp.polarity);
@@ -973,27 +986,7 @@ static int cros_typec_port_update(struct cros_typec_data *typec, int port_num)
if (typec->typec_cmd_supported)
cros_typec_handle_status(typec, port_num);
- /* Update the switches if they exist, according to requested state */
- ret = cros_typec_get_mux_info(typec, port_num, &mux_resp);
- if (ret < 0) {
- dev_warn(typec->dev,
- "Failed to get mux info for port: %d, err = %d\n",
- port_num, ret);
- return 0;
- }
-
- /* No change needs to be made, let's exit early. */
- if (typec->ports[port_num]->mux_flags == mux_resp.flags &&
- typec->ports[port_num]->role == resp.role)
- return 0;
-
- typec->ports[port_num]->mux_flags = mux_resp.flags;
- typec->ports[port_num]->role = resp.role;
- ret = cros_typec_configure_mux(typec, port_num, mux_resp.flags, &resp);
- if (ret)
- dev_warn(typec->dev, "Configure muxes failed, err = %d\n", ret);
-
- return ret;
+ return 0;
}
static int cros_typec_get_cmd_version(struct cros_typec_data *typec)
@@ -1075,7 +1068,13 @@ static int cros_typec_probe(struct platform_device *pdev)
return -ENOMEM;
typec->dev = dev;
+
typec->ec = dev_get_drvdata(pdev->dev.parent);
+ if (!typec->ec) {
+ dev_err(dev, "couldn't find parent EC device\n");
+ return -ENODEV;
+ }
+
platform_set_drvdata(pdev, typec);
ret = cros_typec_get_cmd_version(typec);
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index f6d6d4c26361..41c65b4d2baf 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -1293,6 +1293,16 @@ config RTC_DRV_OPAL
This driver can also be built as a module. If so, the module
will be called rtc-opal.
+config RTC_DRV_OPTEE
+ tristate "OP-TEE based RTC driver"
+ depends on OPTEE
+ help
+ Select this to get support for OP-TEE based RTC control on SoCs where
+ RTC are not accessible to the normal world (Linux).
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-optee.
+
config RTC_DRV_ZYNQMP
tristate "Xilinx Zynq Ultrascale+ MPSoC RTC"
depends on OF && HAS_IOMEM
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index e92f3e943245..2d827d8261d5 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -115,6 +115,7 @@ obj-$(CONFIG_RTC_DRV_GAMECUBE) += rtc-gamecube.o
obj-$(CONFIG_RTC_DRV_NTXEC) += rtc-ntxec.o
obj-$(CONFIG_RTC_DRV_OMAP) += rtc-omap.o
obj-$(CONFIG_RTC_DRV_OPAL) += rtc-opal.o
+obj-$(CONFIG_RTC_DRV_OPTEE) += rtc-optee.o
obj-$(CONFIG_RTC_DRV_PALMAS) += rtc-palmas.o
obj-$(CONFIG_RTC_DRV_PCAP) += rtc-pcap.o
obj-$(CONFIG_RTC_DRV_PCF2123) += rtc-pcf2123.o
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
index 4b460c61f1d8..3c8eec2218df 100644
--- a/drivers/rtc/class.c
+++ b/drivers/rtc/class.c
@@ -26,6 +26,15 @@ struct class *rtc_class;
static void rtc_device_release(struct device *dev)
{
struct rtc_device *rtc = to_rtc_device(dev);
+ struct timerqueue_head *head = &rtc->timerqueue;
+ struct timerqueue_node *node;
+
+ mutex_lock(&rtc->ops_lock);
+ while ((node = timerqueue_getnext(head)))
+ timerqueue_del(head, node);
+ mutex_unlock(&rtc->ops_lock);
+
+ cancel_work_sync(&rtc->irqwork);
ida_simple_remove(&rtc_ida, rtc->id);
mutex_destroy(&rtc->ops_lock);
@@ -390,9 +399,6 @@ int __devm_rtc_register_device(struct module *owner, struct rtc_device *rtc)
if (!rtc->ops->set_alarm)
clear_bit(RTC_FEATURE_ALARM, rtc->features);
- if (rtc->uie_unsupported)
- clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, rtc->features);
-
if (rtc->ops->set_offset)
set_bit(RTC_FEATURE_CORRECTION, rtc->features);
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index d8e835798153..9edd662c69ac 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -804,9 +804,13 @@ static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
struct timerqueue_node *next = timerqueue_getnext(&rtc->timerqueue);
struct rtc_time tm;
ktime_t now;
+ int err;
+
+ err = __rtc_read_time(rtc, &tm);
+ if (err)
+ return err;
timer->enabled = 1;
- __rtc_read_time(rtc, &tm);
now = rtc_tm_to_ktime(tm);
/* Skip over expired timers */
@@ -820,7 +824,6 @@ static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
trace_rtc_timer_enqueue(timer);
if (!next || ktime_before(timer->node.expires, next->expires)) {
struct rtc_wkalrm alarm;
- int err;
alarm.time = rtc_ktime_to_tm(timer->node.expires);
alarm.enabled = 1;
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index 336cb9aa5e33..d51565bcc189 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -1955,7 +1955,7 @@ static int ds1307_probe(struct i2c_client *client,
dev_info(ds1307->dev,
"'wakeup-source' is set, request for an IRQ is disabled!\n");
/* We cannot support UIE mode if we do not have an IRQ line */
- ds1307->rtc->uie_unsupported = 1;
+ clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, ds1307->rtc->features);
}
if (want_irq) {
diff --git a/drivers/rtc/rtc-ds1685.c b/drivers/rtc/rtc-ds1685.c
index 75db7ab654a5..a24331ba8a5f 100644
--- a/drivers/rtc/rtc-ds1685.c
+++ b/drivers/rtc/rtc-ds1685.c
@@ -1273,7 +1273,7 @@ ds1685_rtc_probe(struct platform_device *pdev)
/* See if the platform doesn't support UIE. */
if (pdata->uie_unsupported)
- rtc_dev->uie_unsupported = 1;
+ clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, rtc_dev->features);
rtc->dev = rtc_dev;
@@ -1285,13 +1285,10 @@ ds1685_rtc_probe(struct platform_device *pdev)
* there won't be an automatic way of notifying the kernel about it,
* unless ctrlc is explicitly polled.
*/
- if (!pdata->no_irq) {
- ret = platform_get_irq(pdev, 0);
- if (ret <= 0)
- return ret;
-
- rtc->irq_num = ret;
-
+ rtc->irq_num = platform_get_irq(pdev, 0);
+ if (rtc->irq_num <= 0) {
+ clear_bit(RTC_FEATURE_ALARM, rtc_dev->features);
+ } else {
/* Request an IRQ. */
ret = devm_request_threaded_irq(&pdev->dev, rtc->irq_num,
NULL, ds1685_rtc_irq_handler,
@@ -1305,7 +1302,6 @@ ds1685_rtc_probe(struct platform_device *pdev)
rtc->irq_num = 0;
}
}
- rtc->no_irq = pdata->no_irq;
/* Setup complete. */
ds1685_rtc_switch_to_bank0(rtc);
@@ -1394,7 +1390,7 @@ ds1685_rtc_poweroff(struct platform_device *pdev)
* have been taken care of by the shutdown scripts and this
* is the final function call.
*/
- if (!rtc->no_irq)
+ if (rtc->irq_num)
disable_irq_nosync(rtc->irq_num);
/* Oscillator must be on and the countdown chain enabled. */
diff --git a/drivers/rtc/rtc-efi.c b/drivers/rtc/rtc-efi.c
index 138c5e0046c8..11850c2880ad 100644
--- a/drivers/rtc/rtc-efi.c
+++ b/drivers/rtc/rtc-efi.c
@@ -261,15 +261,17 @@ static int __init efi_rtc_probe(struct platform_device *dev)
if (efi.get_time(&eft, &cap) != EFI_SUCCESS)
return -ENODEV;
- rtc = devm_rtc_device_register(&dev->dev, "rtc-efi", &efi_rtc_ops,
- THIS_MODULE);
+ rtc = devm_rtc_allocate_device(&dev->dev);
if (IS_ERR(rtc))
return PTR_ERR(rtc);
- rtc->uie_unsupported = 1;
platform_set_drvdata(dev, rtc);
- return 0;
+ rtc->ops = &efi_rtc_ops;
+ clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, rtc->features);
+ set_bit(RTC_FEATURE_ALARM_WAKEUP_ONLY, rtc->features);
+
+ return devm_rtc_register_device(rtc);
}
static struct platform_driver efi_rtc_driver = {
diff --git a/drivers/rtc/rtc-gamecube.c b/drivers/rtc/rtc-gamecube.c
index f717b36f4738..18ca3b38b2d0 100644
--- a/drivers/rtc/rtc-gamecube.c
+++ b/drivers/rtc/rtc-gamecube.c
@@ -235,6 +235,7 @@ static int gamecube_rtc_read_offset_from_sram(struct priv *d)
}
ret = of_address_to_resource(np, 0, &res);
+ of_node_put(np);
if (ret) {
pr_err("no io memory range found\n");
return -1;
diff --git a/drivers/rtc/rtc-hym8563.c b/drivers/rtc/rtc-hym8563.c
index 0751cae27285..90e602e99d03 100644
--- a/drivers/rtc/rtc-hym8563.c
+++ b/drivers/rtc/rtc-hym8563.c
@@ -220,24 +220,6 @@ static int hym8563_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
u8 buf[4];
int ret;
- /*
- * The alarm has no seconds so deal with it
- */
- if (alm_tm->tm_sec) {
- alm_tm->tm_sec = 0;
- alm_tm->tm_min++;
- if (alm_tm->tm_min >= 60) {
- alm_tm->tm_min = 0;
- alm_tm->tm_hour++;
- if (alm_tm->tm_hour >= 24) {
- alm_tm->tm_hour = 0;
- alm_tm->tm_mday++;
- if (alm_tm->tm_mday > 31)
- alm_tm->tm_mday = 0;
- }
- }
- }
-
ret = i2c_smbus_read_byte_data(client, HYM8563_CTL2);
if (ret < 0)
return ret;
@@ -523,6 +505,10 @@ static int hym8563_probe(struct i2c_client *client,
if (!hym8563)
return -ENOMEM;
+ hym8563->rtc = devm_rtc_allocate_device(&client->dev);
+ if (IS_ERR(hym8563->rtc))
+ return PTR_ERR(hym8563->rtc);
+
hym8563->client = client;
i2c_set_clientdata(client, hym8563);
@@ -557,19 +543,15 @@ static int hym8563_probe(struct i2c_client *client,
dev_dbg(&client->dev, "rtc information is %s\n",
(ret & HYM8563_SEC_VL) ? "invalid" : "valid");
- hym8563->rtc = devm_rtc_device_register(&client->dev, client->name,
- &hym8563_rtc_ops, THIS_MODULE);
- if (IS_ERR(hym8563->rtc))
- return PTR_ERR(hym8563->rtc);
-
- /* the hym8563 alarm only supports a minute accuracy */
- hym8563->rtc->uie_unsupported = 1;
+ hym8563->rtc->ops = &hym8563_rtc_ops;
+ set_bit(RTC_FEATURE_ALARM_RES_MINUTE, hym8563->rtc->features);
+ clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, hym8563->rtc->features);
#ifdef CONFIG_COMMON_CLK
hym8563_clkout_register_clk(hym8563);
#endif
- return 0;
+ return devm_rtc_register_device(hym8563->rtc);
}
static const struct i2c_device_id hym8563_id[] = {
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
index 6d383b629d20..d868458cd40e 100644
--- a/drivers/rtc/rtc-m41t80.c
+++ b/drivers/rtc/rtc-m41t80.c
@@ -932,10 +932,8 @@ static int m41t80_probe(struct i2c_client *client,
m41t80_data->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
m41t80_data->rtc->range_max = RTC_TIMESTAMP_END_2099;
- if (client->irq <= 0) {
- /* We cannot support UIE mode if we do not have an IRQ line */
- m41t80_data->rtc->uie_unsupported = 1;
- }
+ if (client->irq <= 0)
+ clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, m41t80_data->rtc->features);
/* Make sure HT (Halt Update) bit is cleared */
rc = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_HOUR);
diff --git a/drivers/rtc/rtc-mc146818-lib.c b/drivers/rtc/rtc-mc146818-lib.c
index ae9f131b43c0..522449b25921 100644
--- a/drivers/rtc/rtc-mc146818-lib.c
+++ b/drivers/rtc/rtc-mc146818-lib.c
@@ -176,6 +176,17 @@ int mc146818_get_time(struct rtc_time *time)
}
EXPORT_SYMBOL_GPL(mc146818_get_time);
+/* AMD systems don't allow access to AltCentury with DV1 */
+static bool apply_amd_register_a_behavior(void)
+{
+#ifdef CONFIG_X86
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
+ boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
+ return true;
+#endif
+ return false;
+}
+
/* Set the current date and time in the real time clock. */
int mc146818_set_time(struct rtc_time *time)
{
@@ -232,8 +243,10 @@ int mc146818_set_time(struct rtc_time *time)
if (yrs >= 100)
yrs -= 100;
- if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY)
- || RTC_ALWAYS_BCD) {
+ spin_lock_irqsave(&rtc_lock, flags);
+ save_control = CMOS_READ(RTC_CONTROL);
+ spin_unlock_irqrestore(&rtc_lock, flags);
+ if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
sec = bin2bcd(sec);
min = bin2bcd(min);
hrs = bin2bcd(hrs);
@@ -247,7 +260,10 @@ int mc146818_set_time(struct rtc_time *time)
save_control = CMOS_READ(RTC_CONTROL);
CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
- CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
+ if (apply_amd_register_a_behavior())
+ CMOS_WRITE((save_freq_select & ~RTC_AMD_BANK_SELECT), RTC_FREQ_SELECT);
+ else
+ CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
#ifdef CONFIG_MACH_DECSTATION
CMOS_WRITE(real_yrs, RTC_DEC_YEAR);
diff --git a/drivers/rtc/rtc-mpc5121.c b/drivers/rtc/rtc-mpc5121.c
index bb2ea9bc56f2..6d7656a75cae 100644
--- a/drivers/rtc/rtc-mpc5121.c
+++ b/drivers/rtc/rtc-mpc5121.c
@@ -210,20 +210,6 @@ static int mpc5121_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
struct mpc5121_rtc_data *rtc = dev_get_drvdata(dev);
struct mpc5121_rtc_regs __iomem *regs = rtc->regs;
- /*
- * the alarm has no seconds so deal with it
- */
- if (alarm->time.tm_sec) {
- alarm->time.tm_sec = 0;
- alarm->time.tm_min++;
- if (alarm->time.tm_min >= 60) {
- alarm->time.tm_min = 0;
- alarm->time.tm_hour++;
- if (alarm->time.tm_hour >= 24)
- alarm->time.tm_hour = 0;
- }
- }
-
alarm->time.tm_mday = -1;
alarm->time.tm_mon = -1;
alarm->time.tm_year = -1;
@@ -349,7 +335,8 @@ static int mpc5121_rtc_probe(struct platform_device *op)
}
rtc->rtc->ops = &mpc5200_rtc_ops;
- rtc->rtc->uie_unsupported = 1;
+ set_bit(RTC_FEATURE_ALARM_RES_MINUTE, rtc->rtc->features);
+ clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, rtc->rtc->features);
rtc->rtc->range_min = RTC_TIMESTAMP_BEGIN_0000;
rtc->rtc->range_max = 65733206399ULL; /* 4052-12-31 23:59:59 */
diff --git a/drivers/rtc/rtc-opal.c b/drivers/rtc/rtc-opal.c
index f8f49a969c23..ad41aaf8a17f 100644
--- a/drivers/rtc/rtc-opal.c
+++ b/drivers/rtc/rtc-opal.c
@@ -250,7 +250,7 @@ static int opal_rtc_probe(struct platform_device *pdev)
rtc->ops = &opal_rtc_ops;
rtc->range_min = RTC_TIMESTAMP_BEGIN_0000;
rtc->range_max = RTC_TIMESTAMP_END_9999;
- rtc->uie_unsupported = 1;
+ clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, rtc->features);
return devm_rtc_register_device(rtc);
}
diff --git a/drivers/rtc/rtc-optee.c b/drivers/rtc/rtc-optee.c
new file mode 100644
index 000000000000..9f8b5d4a8f6b
--- /dev/null
+++ b/drivers/rtc/rtc-optee.c
@@ -0,0 +1,362 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 Microchip.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/rtc.h>
+#include <linux/tee_drv.h>
+
+#define RTC_INFO_VERSION 0x1
+
+#define TA_CMD_RTC_GET_INFO 0x0
+#define TA_CMD_RTC_GET_TIME 0x1
+#define TA_CMD_RTC_SET_TIME 0x2
+#define TA_CMD_RTC_GET_OFFSET 0x3
+#define TA_CMD_RTC_SET_OFFSET 0x4
+
+#define TA_RTC_FEATURE_CORRECTION BIT(0)
+
+struct optee_rtc_time {
+ u32 tm_sec;
+ u32 tm_min;
+ u32 tm_hour;
+ u32 tm_mday;
+ u32 tm_mon;
+ u32 tm_year;
+ u32 tm_wday;
+};
+
+struct optee_rtc_info {
+ u64 version;
+ u64 features;
+ struct optee_rtc_time range_min;
+ struct optee_rtc_time range_max;
+};
+
+/**
+ * struct optee_rtc - OP-TEE RTC private data
+ * @dev: OP-TEE based RTC device.
+ * @ctx: OP-TEE context handler.
+ * @session_id: RTC TA session identifier.
+ * @shm: Memory pool shared with RTC device.
+ * @features: Bitfield of RTC features
+ */
+struct optee_rtc {
+ struct device *dev;
+ struct tee_context *ctx;
+ u32 session_id;
+ struct tee_shm *shm;
+ u64 features;
+};
+
+static int optee_rtc_readtime(struct device *dev, struct rtc_time *tm)
+{
+ struct optee_rtc *priv = dev_get_drvdata(dev);
+ struct tee_ioctl_invoke_arg inv_arg = {0};
+ struct optee_rtc_time *optee_tm;
+ struct tee_param param[4] = {0};
+ int ret;
+
+ inv_arg.func = TA_CMD_RTC_GET_TIME;
+ inv_arg.session = priv->session_id;
+ inv_arg.num_params = 4;
+
+ /* Fill invoke cmd params */
+ param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT;
+ param[0].u.memref.shm = priv->shm;
+ param[0].u.memref.size = sizeof(struct optee_rtc_time);
+
+ ret = tee_client_invoke_func(priv->ctx, &inv_arg, param);
+ if (ret < 0 || inv_arg.ret != 0)
+ return ret ? ret : -EPROTO;
+
+ optee_tm = tee_shm_get_va(priv->shm, 0);
+ if (IS_ERR(optee_tm))
+ return PTR_ERR(optee_tm);
+
+ if (param[0].u.memref.size != sizeof(*optee_tm))
+ return -EPROTO;
+
+ tm->tm_sec = optee_tm->tm_sec;
+ tm->tm_min = optee_tm->tm_min;
+ tm->tm_hour = optee_tm->tm_hour;
+ tm->tm_mday = optee_tm->tm_mday;
+ tm->tm_mon = optee_tm->tm_mon;
+ tm->tm_year = optee_tm->tm_year - 1900;
+ tm->tm_wday = optee_tm->tm_wday;
+ tm->tm_yday = rtc_year_days(tm->tm_mday, tm->tm_mon, tm->tm_year);
+
+ return 0;
+}
+
+static int optee_rtc_settime(struct device *dev, struct rtc_time *tm)
+{
+ struct optee_rtc *priv = dev_get_drvdata(dev);
+ struct tee_ioctl_invoke_arg inv_arg = {0};
+ struct tee_param param[4] = {0};
+ struct optee_rtc_time optee_tm;
+ void *rtc_data;
+ int ret;
+
+ optee_tm.tm_sec = tm->tm_sec;
+ optee_tm.tm_min = tm->tm_min;
+ optee_tm.tm_hour = tm->tm_hour;
+ optee_tm.tm_mday = tm->tm_mday;
+ optee_tm.tm_mon = tm->tm_mon;
+ optee_tm.tm_year = tm->tm_year + 1900;
+ optee_tm.tm_wday = tm->tm_wday;
+
+ inv_arg.func = TA_CMD_RTC_SET_TIME;
+ inv_arg.session = priv->session_id;
+ inv_arg.num_params = 4;
+
+ param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
+ param[0].u.memref.shm = priv->shm;
+ param[0].u.memref.size = sizeof(struct optee_rtc_time);
+
+ rtc_data = tee_shm_get_va(priv->shm, 0);
+ if (IS_ERR(rtc_data))
+ return PTR_ERR(rtc_data);
+
+ memcpy(rtc_data, &optee_tm, sizeof(struct optee_rtc_time));
+
+ ret = tee_client_invoke_func(priv->ctx, &inv_arg, param);
+ if (ret < 0 || inv_arg.ret != 0)
+ return ret ? ret : -EPROTO;
+
+ return 0;
+}
+
+static int optee_rtc_readoffset(struct device *dev, long *offset)
+{
+ struct optee_rtc *priv = dev_get_drvdata(dev);
+ struct tee_ioctl_invoke_arg inv_arg = {0};
+ struct tee_param param[4] = {0};
+ int ret;
+
+ if (!(priv->features & TA_RTC_FEATURE_CORRECTION))
+ return -EOPNOTSUPP;
+
+ inv_arg.func = TA_CMD_RTC_GET_OFFSET;
+ inv_arg.session = priv->session_id;
+ inv_arg.num_params = 4;
+
+ param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT;
+
+ ret = tee_client_invoke_func(priv->ctx, &inv_arg, param);
+ if (ret < 0 || inv_arg.ret != 0)
+ return ret ? ret : -EPROTO;
+
+ *offset = param[0].u.value.a;
+
+ return 0;
+}
+
+static int optee_rtc_setoffset(struct device *dev, long offset)
+{
+ struct optee_rtc *priv = dev_get_drvdata(dev);
+ struct tee_ioctl_invoke_arg inv_arg = {0};
+ struct tee_param param[4] = {0};
+ int ret;
+
+ if (!(priv->features & TA_RTC_FEATURE_CORRECTION))
+ return -EOPNOTSUPP;
+
+ inv_arg.func = TA_CMD_RTC_SET_OFFSET;
+ inv_arg.session = priv->session_id;
+ inv_arg.num_params = 4;
+
+ param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
+ param[0].u.value.a = offset;
+
+ ret = tee_client_invoke_func(priv->ctx, &inv_arg, param);
+ if (ret < 0 || inv_arg.ret != 0)
+ return ret ? ret : -EPROTO;
+
+ return 0;
+}
+
+static const struct rtc_class_ops optee_rtc_ops = {
+ .read_time = optee_rtc_readtime,
+ .set_time = optee_rtc_settime,
+ .set_offset = optee_rtc_setoffset,
+ .read_offset = optee_rtc_readoffset,
+};
+
+static int optee_rtc_read_info(struct device *dev, struct rtc_device *rtc,
+ u64 *features)
+{
+ struct optee_rtc *priv = dev_get_drvdata(dev);
+ struct tee_ioctl_invoke_arg inv_arg = {0};
+ struct tee_param param[4] = {0};
+ struct optee_rtc_info *info;
+ struct optee_rtc_time *tm;
+ int ret;
+
+ inv_arg.func = TA_CMD_RTC_GET_INFO;
+ inv_arg.session = priv->session_id;
+ inv_arg.num_params = 4;
+
+ param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT;
+ param[0].u.memref.shm = priv->shm;
+ param[0].u.memref.size = sizeof(*info);
+
+ ret = tee_client_invoke_func(priv->ctx, &inv_arg, param);
+ if (ret < 0 || inv_arg.ret != 0)
+ return ret ? ret : -EPROTO;
+
+ info = tee_shm_get_va(priv->shm, 0);
+ if (IS_ERR(info))
+ return PTR_ERR(info);
+
+ if (param[0].u.memref.size != sizeof(*info))
+ return -EPROTO;
+
+ if (info->version != RTC_INFO_VERSION)
+ return -EPROTO;
+
+ *features = info->features;
+
+ tm = &info->range_min;
+ rtc->range_min = mktime64(tm->tm_year, tm->tm_mon, tm->tm_mday, tm->tm_hour, tm->tm_min,
+ tm->tm_sec);
+ tm = &info->range_max;
+ rtc->range_max = mktime64(tm->tm_year, tm->tm_mon, tm->tm_mday, tm->tm_hour, tm->tm_min,
+ tm->tm_sec);
+
+ return 0;
+}
+
+static int optee_ctx_match(struct tee_ioctl_version_data *ver, const void *data)
+{
+ if (ver->impl_id == TEE_IMPL_ID_OPTEE)
+ return 1;
+ else
+ return 0;
+}
+
+static int optee_rtc_probe(struct device *dev)
+{
+ struct tee_client_device *rtc_device = to_tee_client_device(dev);
+ struct tee_ioctl_open_session_arg sess_arg;
+ struct optee_rtc *priv;
+ struct rtc_device *rtc;
+ struct tee_shm *shm;
+ int ret, err;
+
+ memset(&sess_arg, 0, sizeof(sess_arg));
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ rtc = devm_rtc_allocate_device(dev);
+ if (IS_ERR(rtc))
+ return PTR_ERR(rtc);
+
+ /* Open context with TEE driver */
+ priv->ctx = tee_client_open_context(NULL, optee_ctx_match, NULL, NULL);
+ if (IS_ERR(priv->ctx))
+ return -ENODEV;
+
+ /* Open session with rtc Trusted App */
+ export_uuid(sess_arg.uuid, &rtc_device->id.uuid);
+ sess_arg.clnt_login = TEE_IOCTL_LOGIN_REE_KERNEL;
+
+ ret = tee_client_open_session(priv->ctx, &sess_arg, NULL);
+ if (ret < 0 || sess_arg.ret != 0) {
+ dev_err(dev, "tee_client_open_session failed, err: %x\n", sess_arg.ret);
+ err = -EINVAL;
+ goto out_ctx;
+ }
+ priv->session_id = sess_arg.session;
+
+ shm = tee_shm_alloc_kernel_buf(priv->ctx, sizeof(struct optee_rtc_info));
+ if (IS_ERR(shm)) {
+ dev_err(priv->dev, "tee_shm_alloc_kernel_buf failed\n");
+ err = PTR_ERR(shm);
+ goto out_sess;
+ }
+
+ priv->shm = shm;
+ priv->dev = dev;
+ dev_set_drvdata(dev, priv);
+
+ rtc->ops = &optee_rtc_ops;
+
+ err = optee_rtc_read_info(dev, rtc, &priv->features);
+ if (err) {
+ dev_err(dev, "Failed to get RTC features from OP-TEE\n");
+ goto out_shm;
+ }
+
+ err = devm_rtc_register_device(rtc);
+ if (err)
+ goto out_shm;
+
+ /*
+ * We must clear this bit after registering because rtc_register_device
+ * will set it if it sees that .set_offset is provided.
+ */
+ if (!(priv->features & TA_RTC_FEATURE_CORRECTION))
+ clear_bit(RTC_FEATURE_CORRECTION, rtc->features);
+
+ return 0;
+
+out_shm:
+ tee_shm_free(priv->shm);
+out_sess:
+ tee_client_close_session(priv->ctx, priv->session_id);
+out_ctx:
+ tee_client_close_context(priv->ctx);
+
+ return err;
+}
+
+static int optee_rtc_remove(struct device *dev)
+{
+ struct optee_rtc *priv = dev_get_drvdata(dev);
+
+ tee_client_close_session(priv->ctx, priv->session_id);
+ tee_client_close_context(priv->ctx);
+
+ return 0;
+}
+
+static const struct tee_client_device_id optee_rtc_id_table[] = {
+ {UUID_INIT(0xf389f8c8, 0x845f, 0x496c,
+ 0x8b, 0xbe, 0xd6, 0x4b, 0xd2, 0x4c, 0x92, 0xfd)},
+ {}
+};
+
+MODULE_DEVICE_TABLE(tee, optee_rtc_id_table);
+
+static struct tee_client_driver optee_rtc_driver = {
+ .id_table = optee_rtc_id_table,
+ .driver = {
+ .name = "optee_rtc",
+ .bus = &tee_bus_type,
+ .probe = optee_rtc_probe,
+ .remove = optee_rtc_remove,
+ },
+};
+
+static int __init optee_rtc_mod_init(void)
+{
+ return driver_register(&optee_rtc_driver.driver);
+}
+
+static void __exit optee_rtc_mod_exit(void)
+{
+ driver_unregister(&optee_rtc_driver.driver);
+}
+
+module_init(optee_rtc_mod_init);
+module_exit(optee_rtc_mod_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Clément Léger <clement.leger@bootlin.com>");
+MODULE_DESCRIPTION("OP-TEE based RTC driver");
diff --git a/drivers/rtc/rtc-pcf2123.c b/drivers/rtc/rtc-pcf2123.c
index 7473e6c8a183..e13b5e695d06 100644
--- a/drivers/rtc/rtc-pcf2123.c
+++ b/drivers/rtc/rtc-pcf2123.c
@@ -427,7 +427,8 @@ static int pcf2123_probe(struct spi_device *spi)
* support to this driver to generate interrupts more than once
* per minute.
*/
- rtc->uie_unsupported = 1;
+ set_bit(RTC_FEATURE_ALARM_RES_MINUTE, rtc->features);
+ clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, rtc->features);
rtc->ops = &pcf2123_rtc_ops;
rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
rtc->range_max = RTC_TIMESTAMP_END_2099;
diff --git a/drivers/rtc/rtc-pcf2127.c b/drivers/rtc/rtc-pcf2127.c
index 81a5b1f2e68c..63b275b014bd 100644
--- a/drivers/rtc/rtc-pcf2127.c
+++ b/drivers/rtc/rtc-pcf2127.c
@@ -374,7 +374,8 @@ static int pcf2127_watchdog_init(struct device *dev, struct pcf2127 *pcf2127)
static int pcf2127_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct pcf2127 *pcf2127 = dev_get_drvdata(dev);
- unsigned int buf[5], ctrl2;
+ u8 buf[5];
+ unsigned int ctrl2;
int ret;
ret = regmap_read(pcf2127->regmap, PCF2127_REG_CTRL2, &ctrl2);
@@ -655,13 +656,25 @@ static int pcf2127_probe(struct device *dev, struct regmap *regmap,
pcf2127->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
pcf2127->rtc->range_max = RTC_TIMESTAMP_END_2099;
pcf2127->rtc->set_start_time = true; /* Sets actual start to 1970 */
- pcf2127->rtc->uie_unsupported = 1;
+ set_bit(RTC_FEATURE_ALARM_RES_2S, pcf2127->rtc->features);
+ clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, pcf2127->rtc->features);
clear_bit(RTC_FEATURE_ALARM, pcf2127->rtc->features);
if (alarm_irq > 0) {
+ unsigned long flags;
+
+ /*
+ * If flags = 0, devm_request_threaded_irq() will use IRQ flags
+ * obtained from device tree.
+ */
+ if (dev_fwnode(dev))
+ flags = 0;
+ else
+ flags = IRQF_TRIGGER_LOW;
+
ret = devm_request_threaded_irq(dev, alarm_irq, NULL,
pcf2127_rtc_irq,
- IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ flags | IRQF_ONESHOT,
dev_name(dev), dev);
if (ret) {
dev_err(dev, "failed to request alarm irq\n");
diff --git a/drivers/rtc/rtc-pcf85063.c b/drivers/rtc/rtc-pcf85063.c
index df2b072c394d..9760824ec199 100644
--- a/drivers/rtc/rtc-pcf85063.c
+++ b/drivers/rtc/rtc-pcf85063.c
@@ -616,7 +616,8 @@ static int pcf85063_probe(struct i2c_client *client)
pcf85063->rtc->ops = &pcf85063_rtc_ops;
pcf85063->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
pcf85063->rtc->range_max = RTC_TIMESTAMP_END_2099;
- pcf85063->rtc->uie_unsupported = 1;
+ set_bit(RTC_FEATURE_ALARM_RES_2S, pcf85063->rtc->features);
+ clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, pcf85063->rtc->features);
clear_bit(RTC_FEATURE_ALARM, pcf85063->rtc->features);
if (config->has_alarms && client->irq > 0) {
diff --git a/drivers/rtc/rtc-pcf8523.c b/drivers/rtc/rtc-pcf8523.c
index c93acade7205..b1b1943de844 100644
--- a/drivers/rtc/rtc-pcf8523.c
+++ b/drivers/rtc/rtc-pcf8523.c
@@ -212,14 +212,6 @@ static int pcf8523_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *tm)
if (err < 0)
return err;
- /* The alarm has no seconds, round up to nearest minute */
- if (tm->time.tm_sec) {
- time64_t alarm_time = rtc_tm_to_time64(&tm->time);
-
- alarm_time += 60 - tm->time.tm_sec;
- rtc_time64_to_tm(alarm_time, &tm->time);
- }
-
regs[0] = bin2bcd(tm->time.tm_min);
regs[1] = bin2bcd(tm->time.tm_hour);
regs[2] = bin2bcd(tm->time.tm_mday);
@@ -240,9 +232,9 @@ static int pcf8523_param_get(struct device *dev, struct rtc_param *param)
{
struct pcf8523 *pcf8523 = dev_get_drvdata(dev);
int ret;
+ u32 value;
switch(param->param) {
- u32 value;
case RTC_PARAM_BACKUP_SWITCH_MODE:
ret = regmap_read(pcf8523->regmap, PCF8523_REG_CONTROL3, &value);
@@ -279,9 +271,9 @@ static int pcf8523_param_get(struct device *dev, struct rtc_param *param)
static int pcf8523_param_set(struct device *dev, struct rtc_param *param)
{
struct pcf8523 *pcf8523 = dev_get_drvdata(dev);
+ u8 mode;
switch(param->param) {
- u8 mode;
case RTC_PARAM_BACKUP_SWITCH_MODE:
switch (param->uvalue) {
case RTC_BSM_DISABLED:
@@ -450,7 +442,8 @@ static int pcf8523_probe(struct i2c_client *client,
rtc->ops = &pcf8523_rtc_ops;
rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
rtc->range_max = RTC_TIMESTAMP_END_2099;
- rtc->uie_unsupported = 1;
+ set_bit(RTC_FEATURE_ALARM_RES_MINUTE, rtc->features);
+ clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, rtc->features);
if (client->irq > 0) {
err = regmap_write(pcf8523->regmap, PCF8523_TMR_CLKOUT_CTRL, 0x38);
diff --git a/drivers/rtc/rtc-pcf8563.c b/drivers/rtc/rtc-pcf8563.c
index c8bddfb94129..9d06813e2e6d 100644
--- a/drivers/rtc/rtc-pcf8563.c
+++ b/drivers/rtc/rtc-pcf8563.c
@@ -330,19 +330,6 @@ static int pcf8563_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *tm)
unsigned char buf[4];
int err;
- /* The alarm has no seconds, round up to nearest minute */
- if (tm->time.tm_sec) {
- time64_t alarm_time = rtc_tm_to_time64(&tm->time);
-
- alarm_time += 60 - tm->time.tm_sec;
- rtc_time64_to_tm(alarm_time, &tm->time);
- }
-
- dev_dbg(dev, "%s, min=%d hour=%d wday=%d mday=%d "
- "enabled=%d pending=%d\n", __func__,
- tm->time.tm_min, tm->time.tm_hour, tm->time.tm_wday,
- tm->time.tm_mday, tm->enabled, tm->pending);
-
buf[0] = bin2bcd(tm->time.tm_min);
buf[1] = bin2bcd(tm->time.tm_hour);
buf[2] = bin2bcd(tm->time.tm_mday);
@@ -565,7 +552,8 @@ static int pcf8563_probe(struct i2c_client *client,
pcf8563->rtc->ops = &pcf8563_rtc_ops;
/* the pcf8563 alarm only supports a minute accuracy */
- pcf8563->rtc->uie_unsupported = 1;
+ set_bit(RTC_FEATURE_ALARM_RES_MINUTE, pcf8563->rtc->features);
+ clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, pcf8563->rtc->features);
pcf8563->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
pcf8563->rtc->range_max = RTC_TIMESTAMP_END_2099;
pcf8563->rtc->set_start_time = true;
diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c
index e38ee8848385..bad6a5d9c683 100644
--- a/drivers/rtc/rtc-pl031.c
+++ b/drivers/rtc/rtc-pl031.c
@@ -350,9 +350,6 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
}
}
- if (!adev->irq[0])
- clear_bit(RTC_FEATURE_ALARM, ldata->rtc->features);
-
device_init_wakeup(&adev->dev, true);
ldata->rtc = devm_rtc_allocate_device(&adev->dev);
if (IS_ERR(ldata->rtc)) {
@@ -360,6 +357,9 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
goto out;
}
+ if (!adev->irq[0])
+ clear_bit(RTC_FEATURE_ALARM, ldata->rtc->features);
+
ldata->rtc->ops = ops;
ldata->rtc->range_min = vendor->range_min;
ldata->rtc->range_max = vendor->range_max;
diff --git a/drivers/rtc/rtc-pm8xxx.c b/drivers/rtc/rtc-pm8xxx.c
index 29a1c65661e9..dc6d1476baa5 100644
--- a/drivers/rtc/rtc-pm8xxx.c
+++ b/drivers/rtc/rtc-pm8xxx.c
@@ -7,6 +7,7 @@
#include <linux/rtc.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
+#include <linux/pm_wakeirq.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
@@ -83,7 +84,7 @@ static int pm8xxx_rtc_set_time(struct device *dev, struct rtc_time *tm)
const struct pm8xxx_rtc_regs *regs = rtc_dd->regs;
if (!rtc_dd->allow_set_time)
- return -EACCES;
+ return -ENODEV;
secs = rtc_tm_to_time64(tm);
@@ -527,40 +528,28 @@ static int pm8xxx_rtc_probe(struct platform_device *pdev)
return rc;
}
- return devm_rtc_register_device(rtc_dd->rtc);
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int pm8xxx_rtc_resume(struct device *dev)
-{
- struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev);
+ rc = devm_rtc_register_device(rtc_dd->rtc);
+ if (rc)
+ return rc;
- if (device_may_wakeup(dev))
- disable_irq_wake(rtc_dd->rtc_alarm_irq);
+ rc = dev_pm_set_wake_irq(&pdev->dev, rtc_dd->rtc_alarm_irq);
+ if (rc)
+ return rc;
return 0;
}
-static int pm8xxx_rtc_suspend(struct device *dev)
+static int pm8xxx_remove(struct platform_device *pdev)
{
- struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev);
-
- if (device_may_wakeup(dev))
- enable_irq_wake(rtc_dd->rtc_alarm_irq);
-
+ dev_pm_clear_wake_irq(&pdev->dev);
return 0;
}
-#endif
-
-static SIMPLE_DEV_PM_OPS(pm8xxx_rtc_pm_ops,
- pm8xxx_rtc_suspend,
- pm8xxx_rtc_resume);
static struct platform_driver pm8xxx_rtc_driver = {
.probe = pm8xxx_rtc_probe,
+ .remove = pm8xxx_remove,
.driver = {
.name = "rtc-pm8xxx",
- .pm = &pm8xxx_rtc_pm_ops,
.of_match_table = pm8xxx_id_table,
},
};
diff --git a/drivers/rtc/rtc-spear.c b/drivers/rtc/rtc-spear.c
index b4a520056b1a..d4777b01ab22 100644
--- a/drivers/rtc/rtc-spear.c
+++ b/drivers/rtc/rtc-spear.c
@@ -204,8 +204,10 @@ static int spear_rtc_read_time(struct device *dev, struct rtc_time *tm)
/* we don't report wday/yday/isdst ... */
rtc_wait_not_busy(config);
- time = readl(config->ioaddr + TIME_REG);
- date = readl(config->ioaddr + DATE_REG);
+ do {
+ time = readl(config->ioaddr + TIME_REG);
+ date = readl(config->ioaddr + DATE_REG);
+ } while (time == readl(config->ioaddr + TIME_REG));
tm->tm_sec = (time >> SECOND_SHIFT) & SECOND_MASK;
tm->tm_min = (time >> MINUTE_SHIFT) & MIN_MASK;
tm->tm_hour = (time >> HOUR_SHIFT) & HOUR_MASK;
@@ -352,6 +354,10 @@ static int spear_rtc_probe(struct platform_device *pdev)
if (!config)
return -ENOMEM;
+ config->rtc = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(config->rtc))
+ return PTR_ERR(config->rtc);
+
/* alarm irqs */
irq = platform_get_irq(pdev, 0);
if (irq < 0)
@@ -380,16 +386,13 @@ static int spear_rtc_probe(struct platform_device *pdev)
spin_lock_init(&config->lock);
platform_set_drvdata(pdev, config);
- config->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
- &spear_rtc_ops, THIS_MODULE);
- if (IS_ERR(config->rtc)) {
- dev_err(&pdev->dev, "can't register RTC device, err %ld\n",
- PTR_ERR(config->rtc));
- status = PTR_ERR(config->rtc);
- goto err_disable_clock;
- }
+ config->rtc->ops = &spear_rtc_ops;
+ config->rtc->range_min = RTC_TIMESTAMP_BEGIN_0000;
+ config->rtc->range_min = RTC_TIMESTAMP_END_9999;
- config->rtc->uie_unsupported = 1;
+ status = devm_rtc_register_device(config->rtc);
+ if (status)
+ goto err_disable_clock;
if (!device_can_wakeup(&pdev->dev))
device_init_wakeup(&pdev->dev, 1);
diff --git a/drivers/rtc/rtc-sun6i.c b/drivers/rtc/rtc-sun6i.c
index 711832c758ae..5b3e4da63406 100644
--- a/drivers/rtc/rtc-sun6i.c
+++ b/drivers/rtc/rtc-sun6i.c
@@ -13,6 +13,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/clk/sunxi-ng.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/fs.h>
@@ -48,7 +49,8 @@
/* Alarm 0 (counter) */
#define SUN6I_ALRM_COUNTER 0x0020
-#define SUN6I_ALRM_CUR_VAL 0x0024
+/* This holds the remaining alarm seconds on older SoCs (current value) */
+#define SUN6I_ALRM_COUNTER_HMS 0x0024
#define SUN6I_ALRM_EN 0x0028
#define SUN6I_ALRM_EN_CNT_EN BIT(0)
#define SUN6I_ALRM_IRQ_EN 0x002c
@@ -110,6 +112,8 @@
#define SUN6I_YEAR_MIN 1970
#define SUN6I_YEAR_OFF (SUN6I_YEAR_MIN - 1900)
+#define SECS_PER_DAY (24 * 3600ULL)
+
/*
* There are other differences between models, including:
*
@@ -133,12 +137,15 @@ struct sun6i_rtc_clk_data {
unsigned int has_auto_swt : 1;
};
+#define RTC_LINEAR_DAY BIT(0)
+
struct sun6i_rtc_dev {
struct rtc_device *rtc;
const struct sun6i_rtc_clk_data *data;
void __iomem *base;
int irq;
- unsigned long alarm;
+ time64_t alarm;
+ unsigned long flags;
struct clk_hw hw;
struct clk_hw *int_osc;
@@ -363,23 +370,6 @@ CLK_OF_DECLARE_DRIVER(sun8i_h3_rtc_clk, "allwinner,sun8i-h3-rtc",
CLK_OF_DECLARE_DRIVER(sun50i_h5_rtc_clk, "allwinner,sun50i-h5-rtc",
sun8i_h3_rtc_clk_init);
-static const struct sun6i_rtc_clk_data sun50i_h6_rtc_data = {
- .rc_osc_rate = 16000000,
- .fixed_prescaler = 32,
- .has_prescaler = 1,
- .has_out_clk = 1,
- .export_iosc = 1,
- .has_losc_en = 1,
- .has_auto_swt = 1,
-};
-
-static void __init sun50i_h6_rtc_clk_init(struct device_node *node)
-{
- sun6i_rtc_clk_init(node, &sun50i_h6_rtc_data);
-}
-CLK_OF_DECLARE_DRIVER(sun50i_h6_rtc_clk, "allwinner,sun50i-h6-rtc",
- sun50i_h6_rtc_clk_init);
-
/*
* The R40 user manual is self-conflicting on whether the prescaler is
* fixed or configurable. The clock diagram shows it as fixed, but there
@@ -467,22 +457,30 @@ static int sun6i_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
} while ((date != readl(chip->base + SUN6I_RTC_YMD)) ||
(time != readl(chip->base + SUN6I_RTC_HMS)));
+ if (chip->flags & RTC_LINEAR_DAY) {
+ /*
+ * Newer chips store a linear day number, the manual
+ * does not mandate any epoch base. The BSP driver uses
+ * the UNIX epoch, let's just copy that, as it's the
+ * easiest anyway.
+ */
+ rtc_time64_to_tm((date & 0xffff) * SECS_PER_DAY, rtc_tm);
+ } else {
+ rtc_tm->tm_mday = SUN6I_DATE_GET_DAY_VALUE(date);
+ rtc_tm->tm_mon = SUN6I_DATE_GET_MON_VALUE(date) - 1;
+ rtc_tm->tm_year = SUN6I_DATE_GET_YEAR_VALUE(date);
+
+ /*
+ * switch from (data_year->min)-relative offset to
+ * a (1900)-relative one
+ */
+ rtc_tm->tm_year += SUN6I_YEAR_OFF;
+ }
+
rtc_tm->tm_sec = SUN6I_TIME_GET_SEC_VALUE(time);
rtc_tm->tm_min = SUN6I_TIME_GET_MIN_VALUE(time);
rtc_tm->tm_hour = SUN6I_TIME_GET_HOUR_VALUE(time);
- rtc_tm->tm_mday = SUN6I_DATE_GET_DAY_VALUE(date);
- rtc_tm->tm_mon = SUN6I_DATE_GET_MON_VALUE(date);
- rtc_tm->tm_year = SUN6I_DATE_GET_YEAR_VALUE(date);
-
- rtc_tm->tm_mon -= 1;
-
- /*
- * switch from (data_year->min)-relative offset to
- * a (1900)-relative one
- */
- rtc_tm->tm_year += SUN6I_YEAR_OFF;
-
return 0;
}
@@ -510,36 +508,54 @@ static int sun6i_rtc_setalarm(struct device *dev, struct rtc_wkalrm *wkalrm)
struct sun6i_rtc_dev *chip = dev_get_drvdata(dev);
struct rtc_time *alrm_tm = &wkalrm->time;
struct rtc_time tm_now;
- unsigned long time_now = 0;
- unsigned long time_set = 0;
- unsigned long time_gap = 0;
- int ret = 0;
-
- ret = sun6i_rtc_gettime(dev, &tm_now);
- if (ret < 0) {
- dev_err(dev, "Error in getting time\n");
- return -EINVAL;
- }
+ time64_t time_set;
+ u32 counter_val, counter_val_hms;
+ int ret;
time_set = rtc_tm_to_time64(alrm_tm);
- time_now = rtc_tm_to_time64(&tm_now);
- if (time_set <= time_now) {
- dev_err(dev, "Date to set in the past\n");
- return -EINVAL;
- }
-
- time_gap = time_set - time_now;
- if (time_gap > U32_MAX) {
- dev_err(dev, "Date too far in the future\n");
- return -EINVAL;
+ if (chip->flags & RTC_LINEAR_DAY) {
+ /*
+ * The alarm registers hold the actual alarm time, encoded
+ * in the same way (linear day + HMS) as the current time.
+ */
+ counter_val_hms = SUN6I_TIME_SET_SEC_VALUE(alrm_tm->tm_sec) |
+ SUN6I_TIME_SET_MIN_VALUE(alrm_tm->tm_min) |
+ SUN6I_TIME_SET_HOUR_VALUE(alrm_tm->tm_hour);
+ /* The division will cut off the H:M:S part of alrm_tm. */
+ counter_val = div_u64(rtc_tm_to_time64(alrm_tm), SECS_PER_DAY);
+ } else {
+ /* The alarm register holds the number of seconds left. */
+ time64_t time_now;
+
+ ret = sun6i_rtc_gettime(dev, &tm_now);
+ if (ret < 0) {
+ dev_err(dev, "Error in getting time\n");
+ return -EINVAL;
+ }
+
+ time_now = rtc_tm_to_time64(&tm_now);
+ if (time_set <= time_now) {
+ dev_err(dev, "Date to set in the past\n");
+ return -EINVAL;
+ }
+ if ((time_set - time_now) > U32_MAX) {
+ dev_err(dev, "Date too far in the future\n");
+ return -EINVAL;
+ }
+
+ counter_val = time_set - time_now;
}
sun6i_rtc_setaie(0, chip);
writel(0, chip->base + SUN6I_ALRM_COUNTER);
+ if (chip->flags & RTC_LINEAR_DAY)
+ writel(0, chip->base + SUN6I_ALRM_COUNTER_HMS);
usleep_range(100, 300);
- writel(time_gap, chip->base + SUN6I_ALRM_COUNTER);
+ writel(counter_val, chip->base + SUN6I_ALRM_COUNTER);
+ if (chip->flags & RTC_LINEAR_DAY)
+ writel(counter_val_hms, chip->base + SUN6I_ALRM_COUNTER_HMS);
chip->alarm = time_set;
sun6i_rtc_setaie(wkalrm->enabled, chip);
@@ -571,20 +587,25 @@ static int sun6i_rtc_settime(struct device *dev, struct rtc_time *rtc_tm)
u32 date = 0;
u32 time = 0;
- rtc_tm->tm_year -= SUN6I_YEAR_OFF;
- rtc_tm->tm_mon += 1;
-
- date = SUN6I_DATE_SET_DAY_VALUE(rtc_tm->tm_mday) |
- SUN6I_DATE_SET_MON_VALUE(rtc_tm->tm_mon) |
- SUN6I_DATE_SET_YEAR_VALUE(rtc_tm->tm_year);
-
- if (is_leap_year(rtc_tm->tm_year + SUN6I_YEAR_MIN))
- date |= SUN6I_LEAP_SET_VALUE(1);
-
time = SUN6I_TIME_SET_SEC_VALUE(rtc_tm->tm_sec) |
SUN6I_TIME_SET_MIN_VALUE(rtc_tm->tm_min) |
SUN6I_TIME_SET_HOUR_VALUE(rtc_tm->tm_hour);
+ if (chip->flags & RTC_LINEAR_DAY) {
+ /* The division will cut off the H:M:S part of rtc_tm. */
+ date = div_u64(rtc_tm_to_time64(rtc_tm), SECS_PER_DAY);
+ } else {
+ rtc_tm->tm_year -= SUN6I_YEAR_OFF;
+ rtc_tm->tm_mon += 1;
+
+ date = SUN6I_DATE_SET_DAY_VALUE(rtc_tm->tm_mday) |
+ SUN6I_DATE_SET_MON_VALUE(rtc_tm->tm_mon) |
+ SUN6I_DATE_SET_YEAR_VALUE(rtc_tm->tm_year);
+
+ if (is_leap_year(rtc_tm->tm_year + SUN6I_YEAR_MIN))
+ date |= SUN6I_LEAP_SET_VALUE(1);
+ }
+
/* Check whether registers are writable */
if (sun6i_rtc_wait(chip, SUN6I_LOSC_CTRL,
SUN6I_LOSC_CTRL_ACC_MASK, 50)) {
@@ -668,11 +689,35 @@ static int sun6i_rtc_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(sun6i_rtc_pm_ops,
sun6i_rtc_suspend, sun6i_rtc_resume);
+static void sun6i_rtc_bus_clk_cleanup(void *data)
+{
+ struct clk *bus_clk = data;
+
+ clk_disable_unprepare(bus_clk);
+}
+
static int sun6i_rtc_probe(struct platform_device *pdev)
{
struct sun6i_rtc_dev *chip = sun6i_rtc;
+ struct device *dev = &pdev->dev;
+ struct clk *bus_clk;
int ret;
+ bus_clk = devm_clk_get_optional(dev, "bus");
+ if (IS_ERR(bus_clk))
+ return PTR_ERR(bus_clk);
+
+ if (bus_clk) {
+ ret = clk_prepare_enable(bus_clk);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, sun6i_rtc_bus_clk_cleanup,
+ bus_clk);
+ if (ret)
+ return ret;
+ }
+
if (!chip) {
chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
@@ -683,10 +728,18 @@ static int sun6i_rtc_probe(struct platform_device *pdev)
chip->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(chip->base))
return PTR_ERR(chip->base);
+
+ if (IS_REACHABLE(CONFIG_SUN6I_RTC_CCU)) {
+ ret = sun6i_rtc_ccu_probe(dev, chip->base);
+ if (ret)
+ return ret;
+ }
}
platform_set_drvdata(pdev, chip);
+ chip->flags = (unsigned long)of_device_get_match_data(&pdev->dev);
+
chip->irq = platform_get_irq(pdev, 0);
if (chip->irq < 0)
return chip->irq;
@@ -733,7 +786,10 @@ static int sun6i_rtc_probe(struct platform_device *pdev)
return PTR_ERR(chip->rtc);
chip->rtc->ops = &sun6i_rtc_ops;
- chip->rtc->range_max = 2019686399LL; /* 2033-12-31 23:59:59 */
+ if (chip->flags & RTC_LINEAR_DAY)
+ chip->rtc->range_max = (65536 * SECS_PER_DAY) - 1;
+ else
+ chip->rtc->range_max = 2019686399LL; /* 2033-12-31 23:59:59 */
ret = devm_rtc_register_device(chip->rtc);
if (ret)
@@ -758,6 +814,8 @@ static const struct of_device_id sun6i_rtc_dt_ids[] = {
{ .compatible = "allwinner,sun8i-v3-rtc" },
{ .compatible = "allwinner,sun50i-h5-rtc" },
{ .compatible = "allwinner,sun50i-h6-rtc" },
+ { .compatible = "allwinner,sun50i-h616-rtc",
+ .data = (void *)RTC_LINEAR_DAY },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, sun6i_rtc_dt_ids);
diff --git a/drivers/rtc/rtc-wm8350.c b/drivers/rtc/rtc-wm8350.c
index 2018614f258f..6eaa9321c074 100644
--- a/drivers/rtc/rtc-wm8350.c
+++ b/drivers/rtc/rtc-wm8350.c
@@ -432,14 +432,21 @@ static int wm8350_rtc_probe(struct platform_device *pdev)
return ret;
}
- wm8350_register_irq(wm8350, WM8350_IRQ_RTC_SEC,
+ ret = wm8350_register_irq(wm8350, WM8350_IRQ_RTC_SEC,
wm8350_rtc_update_handler, 0,
"RTC Seconds", wm8350);
+ if (ret)
+ return ret;
+
wm8350_mask_irq(wm8350, WM8350_IRQ_RTC_SEC);
- wm8350_register_irq(wm8350, WM8350_IRQ_RTC_ALM,
+ ret = wm8350_register_irq(wm8350, WM8350_IRQ_RTC_ALM,
wm8350_rtc_alarm_handler, 0,
"RTC Alarm", wm8350);
+ if (ret) {
+ wm8350_free_irq(wm8350, WM8350_IRQ_RTC_SEC, wm8350);
+ return ret;
+ }
return 0;
}
diff --git a/drivers/rtc/rtc-xgene.c b/drivers/rtc/rtc-xgene.c
index cf68a9b1c9eb..d3d0054e21fd 100644
--- a/drivers/rtc/rtc-xgene.c
+++ b/drivers/rtc/rtc-xgene.c
@@ -180,8 +180,6 @@ static int xgene_rtc_probe(struct platform_device *pdev)
return ret;
}
- /* HW does not support update faster than 1 seconds */
- pdata->rtc->uie_unsupported = 1;
pdata->rtc->ops = &xgene_rtc_ops;
pdata->rtc->range_max = U32_MAX;
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index f0763e36b861..cb2491761958 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -745,9 +745,7 @@ sclp_sync_wait(void)
/* Loop until driver state indicates finished request */
while (sclp_running_state != sclp_running_state_idle) {
/* Check for expired request timer */
- if (timer_pending(&sclp_request_timer) &&
- get_tod_clock_fast() > timeout &&
- del_timer(&sclp_request_timer))
+ if (get_tod_clock_fast() > timeout && del_timer(&sclp_request_timer))
sclp_request_timer.function(&sclp_request_timer);
cpu_relax();
}
diff --git a/drivers/s390/char/sclp_con.c b/drivers/s390/char/sclp_con.c
index de028868c6f4..fe5ee2646fcf 100644
--- a/drivers/s390/char/sclp_con.c
+++ b/drivers/s390/char/sclp_con.c
@@ -109,8 +109,7 @@ static void sclp_console_sync_queue(void)
unsigned long flags;
spin_lock_irqsave(&sclp_con_lock, flags);
- if (timer_pending(&sclp_con_timer))
- del_timer(&sclp_con_timer);
+ del_timer(&sclp_con_timer);
while (sclp_con_queue_running) {
spin_unlock_irqrestore(&sclp_con_lock, flags);
sclp_sync_wait();
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c
index 7bc4e4a10937..3b4e7e5d9b71 100644
--- a/drivers/s390/char/sclp_vt220.c
+++ b/drivers/s390/char/sclp_vt220.c
@@ -231,8 +231,7 @@ sclp_vt220_emit_current(void)
list_add_tail(&sclp_vt220_current_request->list,
&sclp_vt220_outqueue);
sclp_vt220_current_request = NULL;
- if (timer_pending(&sclp_vt220_timer))
- del_timer(&sclp_vt220_timer);
+ del_timer(&sclp_vt220_timer);
}
sclp_vt220_flush_later = 0;
}
@@ -776,8 +775,7 @@ static void __sclp_vt220_flush_buffer(void)
sclp_vt220_emit_current();
spin_lock_irqsave(&sclp_vt220_lock, flags);
- if (timer_pending(&sclp_vt220_timer))
- del_timer(&sclp_vt220_timer);
+ del_timer(&sclp_vt220_timer);
while (sclp_vt220_queue_running) {
spin_unlock_irqrestore(&sclp_vt220_lock, flags);
sclp_sync_wait();
diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c
index 7ada994d4592..38cc1565d6ae 100644
--- a/drivers/s390/char/tape_34xx.c
+++ b/drivers/s390/char/tape_34xx.c
@@ -354,10 +354,10 @@ tape_34xx_unit_check(struct tape_device *device, struct tape_request *request,
if ((
sense[0] == SENSE_DATA_CHECK ||
sense[0] == SENSE_EQUIPMENT_CHECK ||
- sense[0] == SENSE_EQUIPMENT_CHECK + SENSE_DEFERRED_UNIT_CHECK
+ sense[0] == (SENSE_EQUIPMENT_CHECK | SENSE_DEFERRED_UNIT_CHECK)
) && (
sense[1] == SENSE_DRIVE_ONLINE ||
- sense[1] == SENSE_BEGINNING_OF_TAPE + SENSE_WRITE_MODE
+ sense[1] == (SENSE_BEGINNING_OF_TAPE | SENSE_WRITE_MODE)
)) {
switch (request->op) {
/*
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 05e136cfb8be..6d63b968309a 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -113,16 +113,10 @@ ccw_device_timeout(struct timer_list *t)
void
ccw_device_set_timeout(struct ccw_device *cdev, int expires)
{
- if (expires == 0) {
+ if (expires == 0)
del_timer(&cdev->private->timer);
- return;
- }
- if (timer_pending(&cdev->private->timer)) {
- if (mod_timer(&cdev->private->timer, jiffies + expires))
- return;
- }
- cdev->private->timer.expires = jiffies + expires;
- add_timer(&cdev->private->timer);
+ else
+ mod_timer(&cdev->private->timer, jiffies + expires);
}
int
diff --git a/drivers/s390/cio/eadm_sch.c b/drivers/s390/cio/eadm_sch.c
index 8b463681a149..ab6a7495180a 100644
--- a/drivers/s390/cio/eadm_sch.c
+++ b/drivers/s390/cio/eadm_sch.c
@@ -112,16 +112,10 @@ static void eadm_subchannel_set_timeout(struct subchannel *sch, int expires)
{
struct eadm_private *private = get_eadm_private(sch);
- if (expires == 0) {
+ if (expires == 0)
del_timer(&private->timer);
- return;
- }
- if (timer_pending(&private->timer)) {
- if (mod_timer(&private->timer, jiffies + expires))
- return;
- }
- private->timer.expires = jiffies + expires;
- add_timer(&private->timer);
+ else
+ mod_timer(&private->timer, jiffies + expires);
}
static void eadm_subchannel_irq(struct subchannel *sch)
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index 8fd5a17bdf99..6a65885f5f43 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -315,6 +315,7 @@ struct ap_perms {
unsigned long ioctlm[BITS_TO_LONGS(AP_IOCTLS)];
unsigned long apm[BITS_TO_LONGS(AP_DEVICES)];
unsigned long aqm[BITS_TO_LONGS(AP_DOMAINS)];
+ unsigned long adm[BITS_TO_LONGS(AP_DOMAINS)];
};
extern struct ap_perms ap_perms;
extern struct mutex ap_perms_mutex;
diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
index cf23ce1b1146..7f69ca695fc2 100644
--- a/drivers/s390/crypto/pkey_api.c
+++ b/drivers/s390/crypto/pkey_api.c
@@ -155,7 +155,7 @@ static int pkey_skey2pkey(const u8 *key, struct pkey_protkey *pkey)
/*
* The cca_xxx2protkey call may fail when a card has been
* addressed where the master key was changed after last fetch
- * of the mkvp into the cache. Try 3 times: First witout verify
+ * of the mkvp into the cache. Try 3 times: First without verify
* then with verify and last round with verify and old master
* key verification pattern match not ignored.
*/
diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
index 7dc26365e29a..6e08d04b605d 100644
--- a/drivers/s390/crypto/vfio_ap_ops.c
+++ b/drivers/s390/crypto/vfio_ap_ops.c
@@ -1189,13 +1189,6 @@ static const struct attribute_group *vfio_ap_mdev_attr_groups[] = {
* @matrix_mdev: a mediated matrix device
* @kvm: reference to KVM instance
*
- * Note: The matrix_dev->lock must be taken prior to calling
- * this function; however, the lock will be temporarily released while the
- * guest's AP configuration is set to avoid a potential lockdep splat.
- * The kvm->lock is taken to set the guest's AP configuration which, under
- * certain circumstances, will result in a circular lock dependency if this is
- * done under the @matrix_mdev->lock.
- *
* Return: 0 if no other mediated matrix device has a reference to @kvm;
* otherwise, returns an -EPERM.
*/
@@ -1269,18 +1262,11 @@ static int vfio_ap_mdev_iommu_notifier(struct notifier_block *nb,
* by @matrix_mdev.
*
* @matrix_mdev: a matrix mediated device
- * @kvm: the pointer to the kvm structure being unset.
- *
- * Note: The matrix_dev->lock must be taken prior to calling
- * this function; however, the lock will be temporarily released while the
- * guest's AP configuration is cleared to avoid a potential lockdep splat.
- * The kvm->lock is taken to clear the guest's AP configuration which, under
- * certain circumstances, will result in a circular lock dependency if this is
- * done under the @matrix_mdev->lock.
*/
-static void vfio_ap_mdev_unset_kvm(struct ap_matrix_mdev *matrix_mdev,
- struct kvm *kvm)
+static void vfio_ap_mdev_unset_kvm(struct ap_matrix_mdev *matrix_mdev)
{
+ struct kvm *kvm = matrix_mdev->kvm;
+
if (kvm && kvm->arch.crypto.crycbd) {
down_write(&kvm->arch.crypto.pqap_hook_rwsem);
kvm->arch.crypto.pqap_hook = NULL;
@@ -1311,7 +1297,7 @@ static int vfio_ap_mdev_group_notifier(struct notifier_block *nb,
matrix_mdev = container_of(nb, struct ap_matrix_mdev, group_notifier);
if (!data)
- vfio_ap_mdev_unset_kvm(matrix_mdev, matrix_mdev->kvm);
+ vfio_ap_mdev_unset_kvm(matrix_mdev);
else if (vfio_ap_mdev_set_kvm(matrix_mdev, data))
notify_rc = NOTIFY_DONE;
@@ -1448,7 +1434,7 @@ static void vfio_ap_mdev_close_device(struct vfio_device *vdev)
&matrix_mdev->iommu_notifier);
vfio_unregister_notifier(vdev->dev, VFIO_GROUP_NOTIFY,
&matrix_mdev->group_notifier);
- vfio_ap_mdev_unset_kvm(matrix_mdev, matrix_mdev->kvm);
+ vfio_ap_mdev_unset_kvm(matrix_mdev);
}
static int vfio_ap_mdev_get_device_info(unsigned long arg)
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 80e2a306709a..aa6dc3c0c353 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -285,10 +285,53 @@ static ssize_t aqmask_store(struct device *dev,
static DEVICE_ATTR_RW(aqmask);
+static ssize_t admask_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int i, rc;
+ struct zcdn_device *zcdndev = to_zcdn_dev(dev);
+
+ if (mutex_lock_interruptible(&ap_perms_mutex))
+ return -ERESTARTSYS;
+
+ buf[0] = '0';
+ buf[1] = 'x';
+ for (i = 0; i < sizeof(zcdndev->perms.adm) / sizeof(long); i++)
+ snprintf(buf + 2 + 2 * i * sizeof(long),
+ PAGE_SIZE - 2 - 2 * i * sizeof(long),
+ "%016lx", zcdndev->perms.adm[i]);
+ buf[2 + 2 * i * sizeof(long)] = '\n';
+ buf[2 + 2 * i * sizeof(long) + 1] = '\0';
+ rc = 2 + 2 * i * sizeof(long) + 1;
+
+ mutex_unlock(&ap_perms_mutex);
+
+ return rc;
+}
+
+static ssize_t admask_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int rc;
+ struct zcdn_device *zcdndev = to_zcdn_dev(dev);
+
+ rc = ap_parse_mask_str(buf, zcdndev->perms.adm,
+ AP_DOMAINS, &ap_perms_mutex);
+ if (rc)
+ return rc;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(admask);
+
static struct attribute *zcdn_dev_attrs[] = {
&dev_attr_ioctlmask.attr,
&dev_attr_apmask.attr,
&dev_attr_aqmask.attr,
+ &dev_attr_admask.attr,
NULL
};
@@ -880,11 +923,22 @@ static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms,
if (rc)
goto out;
+ tdom = *domain;
+ if (perms != &ap_perms && tdom < AP_DOMAINS) {
+ if (ap_msg.flags & AP_MSG_FLAG_ADMIN) {
+ if (!test_bit_inv(tdom, perms->adm)) {
+ rc = -ENODEV;
+ goto out;
+ }
+ } else if ((ap_msg.flags & AP_MSG_FLAG_USAGE) == 0) {
+ rc = -EOPNOTSUPP;
+ goto out;
+ }
+ }
/*
* If a valid target domain is set and this domain is NOT a usage
* domain but a control only domain, autoselect target domain.
*/
- tdom = *domain;
if (tdom < AP_DOMAINS &&
!ap_test_config_usage_domain(tdom) &&
ap_test_config_ctrl_domain(tdom))
@@ -1062,6 +1116,18 @@ static long _zcrypt_send_ep11_cprb(bool userspace, struct ap_perms *perms,
if (rc)
goto out_free;
+ if (perms != &ap_perms && domain < AUTOSEL_DOM) {
+ if (ap_msg.flags & AP_MSG_FLAG_ADMIN) {
+ if (!test_bit_inv(domain, perms->adm)) {
+ rc = -ENODEV;
+ goto out_free;
+ }
+ } else if ((ap_msg.flags & AP_MSG_FLAG_USAGE) == 0) {
+ rc = -EOPNOTSUPP;
+ goto out_free;
+ }
+ }
+
pref_zc = NULL;
pref_zq = NULL;
spin_lock(&zcrypt_list_lock);
diff --git a/drivers/s390/crypto/zcrypt_card.c b/drivers/s390/crypto/zcrypt_card.c
index 3e259befd30a..fcbd537530e8 100644
--- a/drivers/s390/crypto/zcrypt_card.c
+++ b/drivers/s390/crypto/zcrypt_card.c
@@ -90,7 +90,7 @@ static ssize_t online_store(struct device *dev,
list_for_each_entry(zq, &zc->zqueues, list)
maxzqs++;
if (maxzqs > 0)
- zq_uelist = kcalloc(maxzqs + 1, sizeof(zq), GFP_ATOMIC);
+ zq_uelist = kcalloc(maxzqs + 1, sizeof(*zq_uelist), GFP_ATOMIC);
list_for_each_entry(zq, &zc->zqueues, list)
if (zcrypt_queue_force_online(zq, online))
if (zq_uelist) {
diff --git a/drivers/s390/crypto/zcrypt_ep11misc.c b/drivers/s390/crypto/zcrypt_ep11misc.c
index 9ce5a71da69b..98d33f932b0b 100644
--- a/drivers/s390/crypto/zcrypt_ep11misc.c
+++ b/drivers/s390/crypto/zcrypt_ep11misc.c
@@ -1109,7 +1109,7 @@ static int ep11_wrapkey(u16 card, u16 domain,
if (kb->head.type == TOKTYPE_NON_CCA &&
kb->head.version == TOKVER_EP11_AES) {
has_header = true;
- keysize = kb->head.len < keysize ? kb->head.len : keysize;
+ keysize = min_t(size_t, kb->head.len, keysize);
}
/* request cprb and payload */
diff --git a/fs/aio.c b/fs/aio.c
index 7b66b93d5bfa..3c249b938632 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1552,7 +1552,6 @@ static int aio_read(struct kiocb *req, const struct iocb *iocb,
file = req->ki_filp;
if (unlikely(!(file->f_mode & FMODE_READ)))
return -EBADF;
- ret = -EINVAL;
if (unlikely(!file->f_op->read_iter))
return -EINVAL;
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index aa0a60ee26cb..6bfc4343c98d 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -8296,7 +8296,7 @@ static void btrfs_invalidate_folio(struct folio *folio, size_t offset,
* cover the full folio, like invalidating the last folio, we're
* still safe to wait for ordered extent to finish.
*/
- if (!(offset == 0 && length == PAGE_SIZE)) {
+ if (!(offset == 0 && length == folio_size(folio))) {
btrfs_releasepage(&folio->page, GFP_NOFS);
return;
}
diff --git a/fs/btrfs/reflink.c b/fs/btrfs/reflink.c
index 04a88bfe4fcf..998e3f180d90 100644
--- a/fs/btrfs/reflink.c
+++ b/fs/btrfs/reflink.c
@@ -645,7 +645,7 @@ static int btrfs_extent_same_range(struct inode *src, u64 loff, u64 len,
int ret;
/*
- * Lock destination range to serialize with concurrent readpages() and
+ * Lock destination range to serialize with concurrent readahead() and
* source range to serialize with relocation.
*/
btrfs_double_extent_lock(src, loff, dst, dst_loff, len);
@@ -739,7 +739,7 @@ static noinline int btrfs_clone_files(struct file *file, struct file *file_src,
}
/*
- * Lock destination range to serialize with concurrent readpages() and
+ * Lock destination range to serialize with concurrent readahead() and
* source range to serialize with relocation.
*/
btrfs_double_extent_lock(src, off, inode, destoff, len);
diff --git a/fs/buffer.c b/fs/buffer.c
index d67fbe063a3a..2b5561ae5d0b 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -2352,8 +2352,7 @@ int generic_cont_expand_simple(struct inode *inode, loff_t size)
if (err)
goto out;
- err = pagecache_write_begin(NULL, mapping, size, 0,
- AOP_FLAG_CONT_EXPAND, &page, &fsdata);
+ err = pagecache_write_begin(NULL, mapping, size, 0, 0, &page, &fsdata);
if (err)
goto out;
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index feb75eb1cd82..6c9e837aa1d3 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -1869,7 +1869,7 @@ retry_snap:
* are pending vmtruncate. So write and vmtruncate
* can not run at the same time
*/
- written = generic_perform_write(file, from, pos);
+ written = generic_perform_write(iocb, from);
if (likely(written >= 0))
iocb->ki_pos = pos + written;
ceph_end_io_write(inode);
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index ea00e1a91250..9d334816eac0 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -94,7 +94,7 @@ static void cifs_debug_tcon(struct seq_file *m, struct cifs_tcon *tcon)
le32_to_cpu(tcon->fsDevInfo.DeviceCharacteristics),
le32_to_cpu(tcon->fsAttrInfo.Attributes),
le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength),
- tcon->tidStatus);
+ tcon->status);
if (dev_type == FILE_DEVICE_DISK)
seq_puts(m, " type: DISK ");
else if (dev_type == FILE_DEVICE_CD_ROM)
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index d1211ad4e85b..a47fa44b6d52 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -699,14 +699,14 @@ static void cifs_umount_begin(struct super_block *sb)
tcon = cifs_sb_master_tcon(cifs_sb);
spin_lock(&cifs_tcp_ses_lock);
- if ((tcon->tc_count > 1) || (tcon->tidStatus == CifsExiting)) {
+ if ((tcon->tc_count > 1) || (tcon->status == TID_EXITING)) {
/* we have other mounts to same share or we have
already tried to force umount this and woken up
all waiting network requests, nothing to do */
spin_unlock(&cifs_tcp_ses_lock);
return;
} else if (tcon->tc_count == 1)
- tcon->tidStatus = CifsExiting;
+ tcon->status = TID_EXITING;
spin_unlock(&cifs_tcp_ses_lock);
/* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 0a4085ced40f..8de977c359b1 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -116,10 +116,18 @@ enum statusEnum {
CifsInNegotiate,
CifsNeedSessSetup,
CifsInSessSetup,
- CifsNeedTcon,
- CifsInTcon,
- CifsNeedFilesInvalidate,
- CifsInFilesInvalidate
+};
+
+/* associated with each tree connection to the server */
+enum tid_status_enum {
+ TID_NEW = 0,
+ TID_GOOD,
+ TID_EXITING,
+ TID_NEED_RECON,
+ TID_NEED_TCON,
+ TID_IN_TCON,
+ TID_NEED_FILES_INVALIDATE, /* currently unused */
+ TID_IN_FILES_INVALIDATE
};
enum securityEnum {
@@ -853,13 +861,7 @@ compare_mid(__u16 mid, const struct smb_hdr *smb)
#define CIFS_MAX_RFC1002_WSIZE ((1<<17) - 1 - sizeof(WRITE_REQ) + 4)
#define CIFS_MAX_RFC1002_RSIZE ((1<<17) - 1 - sizeof(READ_RSP) + 4)
-/*
- * The default wsize is 1M. find_get_pages seems to return a maximum of 256
- * pages in a single call. With PAGE_SIZE == 4k, this means we can fill
- * a single wsize request with a single call.
- */
#define CIFS_DEFAULT_IOSIZE (1024 * 1024)
-#define SMB3_DEFAULT_IOSIZE (4 * 1024 * 1024)
/*
* Windows only supports a max of 60kb reads and 65535 byte writes. Default to
@@ -1039,7 +1041,7 @@ struct cifs_tcon {
char *password; /* for share-level security */
__u32 tid; /* The 4 byte tree id */
__u16 Flags; /* optional support bits */
- enum statusEnum tidStatus;
+ enum tid_status_enum status;
atomic_t num_smbs_sent;
union {
struct {
diff --git a/fs/cifs/cifspdu.h b/fs/cifs/cifspdu.h
index 68b9a436af4b..aeba371c4c70 100644
--- a/fs/cifs/cifspdu.h
+++ b/fs/cifs/cifspdu.h
@@ -123,18 +123,6 @@
*/
#define CIFS_SESS_KEY_SIZE (16)
-/*
- * Size of the smb3 signing key
- */
-#define SMB3_SIGN_KEY_SIZE (16)
-
-/*
- * Size of the smb3 encryption/decryption key storage.
- * This size is big enough to store any cipher key types.
- */
-#define SMB3_ENC_DEC_KEY_SIZE (32)
-
-#define CIFS_CLIENT_CHALLENGE_SIZE (8)
#define CIFS_SERVER_CHALLENGE_SIZE (8)
#define CIFS_HMAC_MD5_HASH_SIZE (16)
#define CIFS_CPHTXT_SIZE (16)
@@ -1658,7 +1646,7 @@ struct smb_t2_rsp {
#define SMB_FIND_FILE_ID_FULL_DIR_INFO 0x105
#define SMB_FIND_FILE_ID_BOTH_DIR_INFO 0x106
#define SMB_FIND_FILE_UNIX 0x202
-#define SMB_FIND_FILE_POSIX_INFO 0x064
+/* #define SMB_FIND_FILE_POSIX_INFO 0x064 */
typedef struct smb_com_transaction2_qpi_req {
struct smb_hdr hdr; /* wct = 14+ */
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 071e2f21a7db..47e927c4ff8d 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -75,12 +75,11 @@ cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
/* only send once per connect */
spin_lock(&cifs_tcp_ses_lock);
- if (tcon->ses->status != CifsGood ||
- tcon->tidStatus != CifsNeedReconnect) {
+ if ((tcon->ses->status != CifsGood) || (tcon->status != TID_NEED_RECON)) {
spin_unlock(&cifs_tcp_ses_lock);
return;
}
- tcon->tidStatus = CifsInFilesInvalidate;
+ tcon->status = TID_IN_FILES_INVALIDATE;
spin_unlock(&cifs_tcp_ses_lock);
/* list all files open on tree connection and mark them invalid */
@@ -100,8 +99,8 @@ cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
mutex_unlock(&tcon->crfid.fid_mutex);
spin_lock(&cifs_tcp_ses_lock);
- if (tcon->tidStatus == CifsInFilesInvalidate)
- tcon->tidStatus = CifsNeedTcon;
+ if (tcon->status == TID_IN_FILES_INVALIDATE)
+ tcon->status = TID_NEED_TCON;
spin_unlock(&cifs_tcp_ses_lock);
/*
@@ -136,7 +135,7 @@ cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command)
* have tcon) are allowed as we start force umount
*/
spin_lock(&cifs_tcp_ses_lock);
- if (tcon->tidStatus == CifsExiting) {
+ if (tcon->status == TID_EXITING) {
if (smb_command != SMB_COM_WRITE_ANDX &&
smb_command != SMB_COM_OPEN_ANDX &&
smb_command != SMB_COM_TREE_DISCONNECT) {
@@ -597,7 +596,7 @@ CIFSSMBNegotiate(const unsigned int xid,
set_credits(server, server->maxReq);
/* probably no need to store and check maxvcs */
server->maxBuf = le32_to_cpu(pSMBr->MaxBufferSize);
- /* set up max_read for readpages check */
+ /* set up max_read for readahead check */
server->max_read = server->maxBuf;
server->max_rw = le32_to_cpu(pSMBr->MaxRawSize);
cifs_dbg(NOISY, "Max buf = %d\n", ses->server->maxBuf);
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 9964c3634322..ee3b7c15e884 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -245,7 +245,7 @@ cifs_mark_tcp_ses_conns_for_reconnect(struct TCP_Server_Info *server,
list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
tcon->need_reconnect = true;
- tcon->tidStatus = CifsNeedReconnect;
+ tcon->status = TID_NEED_RECON;
}
if (ses->tcon_ipc)
ses->tcon_ipc->need_reconnect = true;
@@ -2207,7 +2207,7 @@ get_ses_fail:
static int match_tcon(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
{
- if (tcon->tidStatus == CifsExiting)
+ if (tcon->status == TID_EXITING)
return 0;
if (strncmp(tcon->treeName, ctx->UNC, MAX_TREE_SIZE))
return 0;
@@ -3513,6 +3513,9 @@ static int connect_dfs_target(struct mount_ctx *mnt_ctx, const char *full_path,
struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
char *oldmnt = cifs_sb->ctx->mount_options;
+ cifs_dbg(FYI, "%s: full_path=%s ref_path=%s target=%s\n", __func__, full_path, ref_path,
+ dfs_cache_get_tgt_name(tit));
+
rc = dfs_cache_get_tgt_referral(ref_path, tit, &ref);
if (rc)
goto out;
@@ -3611,13 +3614,18 @@ static int __follow_dfs_link(struct mount_ctx *mnt_ctx)
if (rc)
goto out;
- /* Try all dfs link targets */
+ /* Try all dfs link targets. If an I/O fails from currently connected DFS target with an
+ * error other than STATUS_PATH_NOT_COVERED (-EREMOTE), then retry it from other targets as
+ * specified in MS-DFSC "3.1.5.2 I/O Operation to Target Fails with an Error Other Than
+ * STATUS_PATH_NOT_COVERED."
+ */
for (rc = -ENOENT, tit = dfs_cache_get_tgt_iterator(&tl);
tit; tit = dfs_cache_get_next_tgt(&tl, tit)) {
rc = connect_dfs_target(mnt_ctx, full_path, mnt_ctx->leaf_fullpath + 1, tit);
if (!rc) {
rc = is_path_remote(mnt_ctx);
- break;
+ if (!rc || rc == -EREMOTE)
+ break;
}
}
@@ -3691,7 +3699,7 @@ int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
goto error;
rc = is_path_remote(&mnt_ctx);
- if (rc == -EREMOTE)
+ if (rc)
rc = follow_dfs_link(&mnt_ctx);
if (rc)
goto error;
@@ -4478,12 +4486,12 @@ int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const stru
/* only send once per connect */
spin_lock(&cifs_tcp_ses_lock);
if (tcon->ses->status != CifsGood ||
- (tcon->tidStatus != CifsNew &&
- tcon->tidStatus != CifsNeedTcon)) {
+ (tcon->status != TID_NEW &&
+ tcon->status != TID_NEED_TCON)) {
spin_unlock(&cifs_tcp_ses_lock);
return 0;
}
- tcon->tidStatus = CifsInTcon;
+ tcon->status = TID_IN_TCON;
spin_unlock(&cifs_tcp_ses_lock);
tree = kzalloc(MAX_TREE_SIZE, GFP_KERNEL);
@@ -4524,13 +4532,13 @@ out:
if (rc) {
spin_lock(&cifs_tcp_ses_lock);
- if (tcon->tidStatus == CifsInTcon)
- tcon->tidStatus = CifsNeedTcon;
+ if (tcon->status == TID_IN_TCON)
+ tcon->status = TID_NEED_TCON;
spin_unlock(&cifs_tcp_ses_lock);
} else {
spin_lock(&cifs_tcp_ses_lock);
- if (tcon->tidStatus == CifsInTcon)
- tcon->tidStatus = CifsGood;
+ if (tcon->status == TID_IN_TCON)
+ tcon->status = TID_GOOD;
spin_unlock(&cifs_tcp_ses_lock);
tcon->need_reconnect = false;
}
@@ -4546,24 +4554,24 @@ int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const stru
/* only send once per connect */
spin_lock(&cifs_tcp_ses_lock);
if (tcon->ses->status != CifsGood ||
- (tcon->tidStatus != CifsNew &&
- tcon->tidStatus != CifsNeedTcon)) {
+ (tcon->status != TID_NEW &&
+ tcon->status != TID_NEED_TCON)) {
spin_unlock(&cifs_tcp_ses_lock);
return 0;
}
- tcon->tidStatus = CifsInTcon;
+ tcon->status = TID_IN_TCON;
spin_unlock(&cifs_tcp_ses_lock);
rc = ops->tree_connect(xid, tcon->ses, tcon->treeName, tcon, nlsc);
if (rc) {
spin_lock(&cifs_tcp_ses_lock);
- if (tcon->tidStatus == CifsInTcon)
- tcon->tidStatus = CifsNeedTcon;
+ if (tcon->status == TID_IN_TCON)
+ tcon->status = TID_NEED_TCON;
spin_unlock(&cifs_tcp_ses_lock);
} else {
spin_lock(&cifs_tcp_ses_lock);
- if (tcon->tidStatus == CifsInTcon)
- tcon->tidStatus = CifsGood;
+ if (tcon->status == TID_IN_TCON)
+ tcon->status = TID_GOOD;
spin_unlock(&cifs_tcp_ses_lock);
tcon->need_reconnect = false;
}
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 60f43bff7ccb..d511a78383c3 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -4210,13 +4210,19 @@ cifs_page_mkwrite(struct vm_fault *vmf)
{
struct page *page = vmf->page;
+ /* Wait for the page to be written to the cache before we allow it to
+ * be modified. We then assume the entire page will need writing back.
+ */
#ifdef CONFIG_CIFS_FSCACHE
if (PageFsCache(page) &&
wait_on_page_fscache_killable(page) < 0)
return VM_FAULT_RETRY;
#endif
- lock_page(page);
+ wait_on_page_writeback(page);
+
+ if (lock_page_killable(page) < 0)
+ return VM_FAULT_RETRY;
return VM_FAULT_LOCKED;
}
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 60d853c92f6a..2f9e7d2f81b6 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -49,7 +49,7 @@ static void cifs_set_ops(struct inode *inode)
inode->i_fop = &cifs_file_ops;
}
- /* check if server can support readpages */
+ /* check if server can support readahead */
if (cifs_sb_master_tcon(cifs_sb)->ses->server->max_read <
PAGE_SIZE + MAX_CIFS_HDR_SIZE)
inode->i_data.a_ops = &cifs_addr_ops_smallbuf;
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index 56598f7dbe00..afaf59c22193 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -116,7 +116,7 @@ tconInfoAlloc(void)
}
atomic_inc(&tconInfoAllocCount);
- ret_buf->tidStatus = CifsNew;
+ ret_buf->status = TID_NEW;
++ret_buf->tc_count;
INIT_LIST_HEAD(&ret_buf->openFileList);
INIT_LIST_HEAD(&ret_buf->tcon_list);
diff --git a/fs/cifs/smb2glob.h b/fs/cifs/smb2glob.h
index 4125fd113cfb..82e916ad167c 100644
--- a/fs/cifs/smb2glob.h
+++ b/fs/cifs/smb2glob.h
@@ -41,15 +41,4 @@
#define END_OF_CHAIN 4
#define RELATED_REQUEST 8
-#define SMB2_SIGNATURE_SIZE (16)
-#define SMB2_NTLMV2_SESSKEY_SIZE (16)
-#define SMB2_HMACSHA256_SIZE (32)
-#define SMB2_CMACAES_SIZE (16)
-#define SMB3_SIGNKEY_SIZE (16)
-#define SMB3_GCM128_CRYPTKEY_SIZE (16)
-#define SMB3_GCM256_CRYPTKEY_SIZE (32)
-
-/* Maximum buffer size value we can send with 1 credit */
-#define SMB2_MAX_BUFFER_SIZE 65536
-
#endif /* _SMB2_GLOB_H */
diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
index b25623e3fe3d..c653beb735b8 100644
--- a/fs/cifs/smb2misc.c
+++ b/fs/cifs/smb2misc.c
@@ -203,7 +203,7 @@ smb2_check_message(char *buf, unsigned int len, struct TCP_Server_Info *srvr)
if (smb2_rsp_struct_sizes[command] != pdu->StructureSize2) {
if (command != SMB2_OPLOCK_BREAK_HE && (shdr->Status == 0 ||
- pdu->StructureSize2 != SMB2_ERROR_STRUCTURE_SIZE2)) {
+ pdu->StructureSize2 != SMB2_ERROR_STRUCTURE_SIZE2_LE)) {
/* error packets have 9 byte structure size */
cifs_dbg(VFS, "Invalid response size %u for command %d\n",
le16_to_cpu(pdu->StructureSize2), command);
@@ -303,7 +303,7 @@ smb2_get_data_area_len(int *off, int *len, struct smb2_hdr *shdr)
/* error responses do not have data area */
if (shdr->Status && shdr->Status != STATUS_MORE_PROCESSING_REQUIRED &&
(((struct smb2_err_rsp *)shdr)->StructureSize) ==
- SMB2_ERROR_STRUCTURE_SIZE2)
+ SMB2_ERROR_STRUCTURE_SIZE2_LE)
return NULL;
/*
@@ -478,11 +478,11 @@ smb2_get_lease_state(struct cifsInodeInfo *cinode)
__le32 lease = 0;
if (CIFS_CACHE_WRITE(cinode))
- lease |= SMB2_LEASE_WRITE_CACHING;
+ lease |= SMB2_LEASE_WRITE_CACHING_LE;
if (CIFS_CACHE_HANDLE(cinode))
- lease |= SMB2_LEASE_HANDLE_CACHING;
+ lease |= SMB2_LEASE_HANDLE_CACHING_LE;
if (CIFS_CACHE_READ(cinode))
- lease |= SMB2_LEASE_READ_CACHING;
+ lease |= SMB2_LEASE_READ_CACHING_LE;
return lease;
}
@@ -832,8 +832,8 @@ smb2_handle_cancelled_mid(struct mid_q_entry *mid, struct TCP_Server_Info *serve
rc = __smb2_handle_cancelled_cmd(tcon,
le16_to_cpu(hdr->Command),
le64_to_cpu(hdr->MessageId),
- le64_to_cpu(rsp->PersistentFileId),
- le64_to_cpu(rsp->VolatileFileId));
+ rsp->PersistentFileId,
+ rsp->VolatileFileId);
if (rc)
cifs_put_tcon(tcon);
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 891b11576e55..db23f5b404ba 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -897,8 +897,8 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
atomic_inc(&tcon->num_remote_opens);
o_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base;
- oparms.fid->persistent_fid = le64_to_cpu(o_rsp->PersistentFileId);
- oparms.fid->volatile_fid = le64_to_cpu(o_rsp->VolatileFileId);
+ oparms.fid->persistent_fid = o_rsp->PersistentFileId;
+ oparms.fid->volatile_fid = o_rsp->VolatileFileId;
#ifdef CONFIG_CIFS_DEBUG2
oparms.fid->mid = le64_to_cpu(o_rsp->hdr.MessageId);
#endif /* CIFS_DEBUG2 */
@@ -1192,17 +1192,12 @@ smb2_query_eas(const unsigned int xid, struct cifs_tcon *tcon,
struct cifs_sb_info *cifs_sb)
{
int rc;
- __le16 *utf16_path;
struct kvec rsp_iov = {NULL, 0};
int buftype = CIFS_NO_BUFFER;
struct smb2_query_info_rsp *rsp;
struct smb2_file_full_ea_info *info = NULL;
- utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
- if (!utf16_path)
- return -ENOMEM;
-
- rc = smb2_query_info_compound(xid, tcon, utf16_path,
+ rc = smb2_query_info_compound(xid, tcon, path,
FILE_READ_EA,
FILE_FULL_EA_INFORMATION,
SMB2_O_INFO_FILE,
@@ -1235,7 +1230,6 @@ smb2_query_eas(const unsigned int xid, struct cifs_tcon *tcon,
le32_to_cpu(rsp->OutputBufferLength), ea_name);
qeas_exit:
- kfree(utf16_path);
free_rsp_buf(buftype, rsp_iov.iov_base);
return rc;
}
@@ -1295,7 +1289,7 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
* the new EA. If not we should not add it since we
* would not be able to even read the EAs back.
*/
- rc = smb2_query_info_compound(xid, tcon, utf16_path,
+ rc = smb2_query_info_compound(xid, tcon, path,
FILE_READ_EA,
FILE_FULL_EA_INFORMATION,
SMB2_O_INFO_FILE,
@@ -1643,6 +1637,7 @@ smb2_ioctl_query_info(const unsigned int xid,
unsigned int size[2];
void *data[2];
int create_options = is_dir ? CREATE_NOT_FILE : CREATE_NOT_DIR;
+ void (*free_req1_func)(struct smb_rqst *r);
vars = kzalloc(sizeof(*vars), GFP_ATOMIC);
if (vars == NULL)
@@ -1652,27 +1647,29 @@ smb2_ioctl_query_info(const unsigned int xid,
resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
- if (copy_from_user(&qi, arg, sizeof(struct smb_query_info)))
- goto e_fault;
-
+ if (copy_from_user(&qi, arg, sizeof(struct smb_query_info))) {
+ rc = -EFAULT;
+ goto free_vars;
+ }
if (qi.output_buffer_length > 1024) {
- kfree(vars);
- return -EINVAL;
+ rc = -EINVAL;
+ goto free_vars;
}
if (!ses || !server) {
- kfree(vars);
- return -EIO;
+ rc = -EIO;
+ goto free_vars;
}
if (smb3_encryption_required(tcon))
flags |= CIFS_TRANSFORM_REQ;
- buffer = memdup_user(arg + sizeof(struct smb_query_info),
- qi.output_buffer_length);
- if (IS_ERR(buffer)) {
- kfree(vars);
- return PTR_ERR(buffer);
+ if (qi.output_buffer_length) {
+ buffer = memdup_user(arg + sizeof(struct smb_query_info), qi.output_buffer_length);
+ if (IS_ERR(buffer)) {
+ rc = PTR_ERR(buffer);
+ goto free_vars;
+ }
}
/* Open */
@@ -1710,45 +1707,45 @@ smb2_ioctl_query_info(const unsigned int xid,
rc = SMB2_open_init(tcon, server,
&rqst[0], &oplock, &oparms, path);
if (rc)
- goto iqinf_exit;
+ goto free_output_buffer;
smb2_set_next_command(tcon, &rqst[0]);
/* Query */
if (qi.flags & PASSTHRU_FSCTL) {
/* Can eventually relax perm check since server enforces too */
- if (!capable(CAP_SYS_ADMIN))
+ if (!capable(CAP_SYS_ADMIN)) {
rc = -EPERM;
- else {
- rqst[1].rq_iov = &vars->io_iov[0];
- rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE;
-
- rc = SMB2_ioctl_init(tcon, server,
- &rqst[1],
- COMPOUND_FID, COMPOUND_FID,
- qi.info_type, true, buffer,
- qi.output_buffer_length,
- CIFSMaxBufSize -
- MAX_SMB2_CREATE_RESPONSE_SIZE -
- MAX_SMB2_CLOSE_RESPONSE_SIZE);
+ goto free_open_req;
}
+ rqst[1].rq_iov = &vars->io_iov[0];
+ rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE;
+
+ rc = SMB2_ioctl_init(tcon, server, &rqst[1], COMPOUND_FID, COMPOUND_FID,
+ qi.info_type, true, buffer, qi.output_buffer_length,
+ CIFSMaxBufSize - MAX_SMB2_CREATE_RESPONSE_SIZE -
+ MAX_SMB2_CLOSE_RESPONSE_SIZE);
+ free_req1_func = SMB2_ioctl_free;
} else if (qi.flags == PASSTHRU_SET_INFO) {
/* Can eventually relax perm check since server enforces too */
- if (!capable(CAP_SYS_ADMIN))
+ if (!capable(CAP_SYS_ADMIN)) {
rc = -EPERM;
- else {
- rqst[1].rq_iov = &vars->si_iov[0];
- rqst[1].rq_nvec = 1;
-
- size[0] = 8;
- data[0] = buffer;
-
- rc = SMB2_set_info_init(tcon, server,
- &rqst[1],
- COMPOUND_FID, COMPOUND_FID,
- current->tgid,
- FILE_END_OF_FILE_INFORMATION,
- SMB2_O_INFO_FILE, 0, data, size);
+ goto free_open_req;
+ }
+ if (qi.output_buffer_length < 8) {
+ rc = -EINVAL;
+ goto free_open_req;
}
+ rqst[1].rq_iov = &vars->si_iov[0];
+ rqst[1].rq_nvec = 1;
+
+ /* MS-FSCC 2.4.13 FileEndOfFileInformation */
+ size[0] = 8;
+ data[0] = buffer;
+
+ rc = SMB2_set_info_init(tcon, server, &rqst[1], COMPOUND_FID, COMPOUND_FID,
+ current->tgid, FILE_END_OF_FILE_INFORMATION,
+ SMB2_O_INFO_FILE, 0, data, size);
+ free_req1_func = SMB2_set_info_free;
} else if (qi.flags == PASSTHRU_QUERY_INFO) {
rqst[1].rq_iov = &vars->qi_iov[0];
rqst[1].rq_nvec = 1;
@@ -1759,6 +1756,7 @@ smb2_ioctl_query_info(const unsigned int xid,
qi.info_type, qi.additional_information,
qi.input_buffer_length,
qi.output_buffer_length, buffer);
+ free_req1_func = SMB2_query_info_free;
} else { /* unknown flags */
cifs_tcon_dbg(VFS, "Invalid passthru query flags: 0x%x\n",
qi.flags);
@@ -1766,7 +1764,7 @@ smb2_ioctl_query_info(const unsigned int xid,
}
if (rc)
- goto iqinf_exit;
+ goto free_open_req;
smb2_set_next_command(tcon, &rqst[1]);
smb2_set_related(&rqst[1]);
@@ -1777,14 +1775,14 @@ smb2_ioctl_query_info(const unsigned int xid,
rc = SMB2_close_init(tcon, server,
&rqst[2], COMPOUND_FID, COMPOUND_FID, false);
if (rc)
- goto iqinf_exit;
+ goto free_req_1;
smb2_set_related(&rqst[2]);
rc = compound_send_recv(xid, ses, server,
flags, 3, rqst,
resp_buftype, rsp_iov);
if (rc)
- goto iqinf_exit;
+ goto out;
/* No need to bump num_remote_opens since handle immediately closed */
if (qi.flags & PASSTHRU_FSCTL) {
@@ -1794,18 +1792,22 @@ smb2_ioctl_query_info(const unsigned int xid,
qi.input_buffer_length = le32_to_cpu(io_rsp->OutputCount);
if (qi.input_buffer_length > 0 &&
le32_to_cpu(io_rsp->OutputOffset) + qi.input_buffer_length
- > rsp_iov[1].iov_len)
- goto e_fault;
+ > rsp_iov[1].iov_len) {
+ rc = -EFAULT;
+ goto out;
+ }
if (copy_to_user(&pqi->input_buffer_length,
&qi.input_buffer_length,
- sizeof(qi.input_buffer_length)))
- goto e_fault;
+ sizeof(qi.input_buffer_length))) {
+ rc = -EFAULT;
+ goto out;
+ }
if (copy_to_user((void __user *)pqi + sizeof(struct smb_query_info),
(const void *)io_rsp + le32_to_cpu(io_rsp->OutputOffset),
qi.input_buffer_length))
- goto e_fault;
+ rc = -EFAULT;
} else {
pqi = (struct smb_query_info __user *)arg;
qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
@@ -1813,28 +1815,30 @@ smb2_ioctl_query_info(const unsigned int xid,
qi.input_buffer_length = le32_to_cpu(qi_rsp->OutputBufferLength);
if (copy_to_user(&pqi->input_buffer_length,
&qi.input_buffer_length,
- sizeof(qi.input_buffer_length)))
- goto e_fault;
+ sizeof(qi.input_buffer_length))) {
+ rc = -EFAULT;
+ goto out;
+ }
if (copy_to_user(pqi + 1, qi_rsp->Buffer,
qi.input_buffer_length))
- goto e_fault;
+ rc = -EFAULT;
}
- iqinf_exit:
- cifs_small_buf_release(rqst[0].rq_iov[0].iov_base);
- cifs_small_buf_release(rqst[1].rq_iov[0].iov_base);
- cifs_small_buf_release(rqst[2].rq_iov[0].iov_base);
+out:
free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
- kfree(vars);
+ SMB2_close_free(&rqst[2]);
+free_req_1:
+ free_req1_func(&rqst[1]);
+free_open_req:
+ SMB2_open_free(&rqst[0]);
+free_output_buffer:
kfree(buffer);
+free_vars:
+ kfree(vars);
return rc;
-
-e_fault:
- rc = -EFAULT;
- goto iqinf_exit;
}
static ssize_t
@@ -2407,8 +2411,8 @@ again:
cifs_dbg(FYI, "query_dir_first: open failed rc=%d\n", rc);
goto qdf_free;
}
- fid->persistent_fid = le64_to_cpu(op_rsp->PersistentFileId);
- fid->volatile_fid = le64_to_cpu(op_rsp->VolatileFileId);
+ fid->persistent_fid = op_rsp->PersistentFileId;
+ fid->volatile_fid = op_rsp->VolatileFileId;
/* Anything else than ENODATA means a genuine error */
if (rc && rc != -ENODATA) {
@@ -2646,7 +2650,7 @@ smb2_set_next_command(struct cifs_tcon *tcon, struct smb_rqst *rqst)
*/
int
smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
- __le16 *utf16_path, u32 desired_access,
+ const char *path, u32 desired_access,
u32 class, u32 type, u32 output_len,
struct kvec *rsp, int *buftype,
struct cifs_sb_info *cifs_sb)
@@ -2664,6 +2668,14 @@ smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
struct cifs_open_parms oparms;
struct cifs_fid fid;
int rc;
+ __le16 *utf16_path;
+ struct cached_fid *cfid = NULL;
+
+ if (!path)
+ path = "";
+ utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
+ if (!utf16_path)
+ return -ENOMEM;
if (smb3_encryption_required(tcon))
flags |= CIFS_TRANSFORM_REQ;
@@ -2672,6 +2684,8 @@ smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
memset(rsp_iov, 0, sizeof(rsp_iov));
+ rc = open_cached_dir(xid, tcon, path, cifs_sb, &cfid);
+
memset(&open_iov, 0, sizeof(open_iov));
rqst[0].rq_iov = open_iov;
rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
@@ -2693,15 +2707,29 @@ smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
rqst[1].rq_iov = qi_iov;
rqst[1].rq_nvec = 1;
- rc = SMB2_query_info_init(tcon, server,
- &rqst[1], COMPOUND_FID, COMPOUND_FID,
- class, type, 0,
- output_len, 0,
- NULL);
+ if (cfid) {
+ rc = SMB2_query_info_init(tcon, server,
+ &rqst[1],
+ cfid->fid->persistent_fid,
+ cfid->fid->volatile_fid,
+ class, type, 0,
+ output_len, 0,
+ NULL);
+ } else {
+ rc = SMB2_query_info_init(tcon, server,
+ &rqst[1],
+ COMPOUND_FID,
+ COMPOUND_FID,
+ class, type, 0,
+ output_len, 0,
+ NULL);
+ }
if (rc)
goto qic_exit;
- smb2_set_next_command(tcon, &rqst[1]);
- smb2_set_related(&rqst[1]);
+ if (!cfid) {
+ smb2_set_next_command(tcon, &rqst[1]);
+ smb2_set_related(&rqst[1]);
+ }
memset(&close_iov, 0, sizeof(close_iov));
rqst[2].rq_iov = close_iov;
@@ -2713,9 +2741,15 @@ smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
goto qic_exit;
smb2_set_related(&rqst[2]);
- rc = compound_send_recv(xid, ses, server,
- flags, 3, rqst,
- resp_buftype, rsp_iov);
+ if (cfid) {
+ rc = compound_send_recv(xid, ses, server,
+ flags, 1, &rqst[1],
+ &resp_buftype[1], &rsp_iov[1]);
+ } else {
+ rc = compound_send_recv(xid, ses, server,
+ flags, 3, rqst,
+ resp_buftype, rsp_iov);
+ }
if (rc) {
free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
if (rc == -EREMCHG) {
@@ -2729,11 +2763,14 @@ smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
*buftype = resp_buftype[1];
qic_exit:
+ kfree(utf16_path);
SMB2_open_free(&rqst[0]);
SMB2_query_info_free(&rqst[1]);
SMB2_close_free(&rqst[2]);
free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
+ if (cfid)
+ close_cached_dir(cfid);
return rc;
}
@@ -2743,13 +2780,12 @@ smb2_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
{
struct smb2_query_info_rsp *rsp;
struct smb2_fs_full_size_info *info = NULL;
- __le16 utf16_path = 0; /* Null - open root of share */
struct kvec rsp_iov = {NULL, 0};
int buftype = CIFS_NO_BUFFER;
int rc;
- rc = smb2_query_info_compound(xid, tcon, &utf16_path,
+ rc = smb2_query_info_compound(xid, tcon, "",
FILE_READ_ATTRIBUTES,
FS_FULL_SIZE_INFORMATION,
SMB2_O_INFO_FILESYSTEM,
@@ -4293,12 +4329,12 @@ static __le32
map_oplock_to_lease(u8 oplock)
{
if (oplock == SMB2_OPLOCK_LEVEL_EXCLUSIVE)
- return SMB2_LEASE_WRITE_CACHING | SMB2_LEASE_READ_CACHING;
+ return SMB2_LEASE_WRITE_CACHING_LE | SMB2_LEASE_READ_CACHING_LE;
else if (oplock == SMB2_OPLOCK_LEVEL_II)
- return SMB2_LEASE_READ_CACHING;
+ return SMB2_LEASE_READ_CACHING_LE;
else if (oplock == SMB2_OPLOCK_LEVEL_BATCH)
- return SMB2_LEASE_HANDLE_CACHING | SMB2_LEASE_READ_CACHING |
- SMB2_LEASE_WRITE_CACHING;
+ return SMB2_LEASE_HANDLE_CACHING_LE | SMB2_LEASE_READ_CACHING_LE |
+ SMB2_LEASE_WRITE_CACHING_LE;
return 0;
}
@@ -4360,7 +4396,7 @@ smb2_parse_lease_buf(void *buf, unsigned int *epoch, char *lease_key)
struct create_lease *lc = (struct create_lease *)buf;
*epoch = 0; /* not used */
- if (lc->lcontext.LeaseFlags & SMB2_LEASE_FLAG_BREAK_IN_PROGRESS)
+ if (lc->lcontext.LeaseFlags & SMB2_LEASE_FLAG_BREAK_IN_PROGRESS_LE)
return SMB2_OPLOCK_LEVEL_NOCHANGE;
return le32_to_cpu(lc->lcontext.LeaseState);
}
@@ -4371,7 +4407,7 @@ smb3_parse_lease_buf(void *buf, unsigned int *epoch, char *lease_key)
struct create_lease_v2 *lc = (struct create_lease_v2 *)buf;
*epoch = le16_to_cpu(lc->lcontext.Epoch);
- if (lc->lcontext.LeaseFlags & SMB2_LEASE_FLAG_BREAK_IN_PROGRESS)
+ if (lc->lcontext.LeaseFlags & SMB2_LEASE_FLAG_BREAK_IN_PROGRESS_LE)
return SMB2_OPLOCK_LEVEL_NOCHANGE;
if (lease_key)
memcpy(lease_key, &lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE);
@@ -5814,8 +5850,8 @@ struct smb_version_values smb20_values = {
.protocol_id = SMB20_PROT_ID,
.req_capabilities = 0, /* MBZ */
.large_lock_type = 0,
- .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
- .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
+ .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
+ .shared_lock_type = SMB2_LOCKFLAG_SHARED,
.unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
.header_size = sizeof(struct smb2_hdr),
.header_preamble_size = 0,
@@ -5835,8 +5871,8 @@ struct smb_version_values smb21_values = {
.protocol_id = SMB21_PROT_ID,
.req_capabilities = 0, /* MBZ on negotiate req until SMB3 dialect */
.large_lock_type = 0,
- .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
- .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
+ .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
+ .shared_lock_type = SMB2_LOCKFLAG_SHARED,
.unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
.header_size = sizeof(struct smb2_hdr),
.header_preamble_size = 0,
@@ -5856,8 +5892,8 @@ struct smb_version_values smb3any_values = {
.protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
.req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
.large_lock_type = 0,
- .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
- .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
+ .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
+ .shared_lock_type = SMB2_LOCKFLAG_SHARED,
.unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
.header_size = sizeof(struct smb2_hdr),
.header_preamble_size = 0,
@@ -5877,8 +5913,8 @@ struct smb_version_values smbdefault_values = {
.protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
.req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
.large_lock_type = 0,
- .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
- .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
+ .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
+ .shared_lock_type = SMB2_LOCKFLAG_SHARED,
.unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
.header_size = sizeof(struct smb2_hdr),
.header_preamble_size = 0,
@@ -5898,8 +5934,8 @@ struct smb_version_values smb30_values = {
.protocol_id = SMB30_PROT_ID,
.req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
.large_lock_type = 0,
- .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
- .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
+ .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
+ .shared_lock_type = SMB2_LOCKFLAG_SHARED,
.unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
.header_size = sizeof(struct smb2_hdr),
.header_preamble_size = 0,
@@ -5919,8 +5955,8 @@ struct smb_version_values smb302_values = {
.protocol_id = SMB302_PROT_ID,
.req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
.large_lock_type = 0,
- .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
- .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
+ .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
+ .shared_lock_type = SMB2_LOCKFLAG_SHARED,
.unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
.header_size = sizeof(struct smb2_hdr),
.header_preamble_size = 0,
@@ -5940,8 +5976,8 @@ struct smb_version_values smb311_values = {
.protocol_id = SMB311_PROT_ID,
.req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
.large_lock_type = 0,
- .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
- .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
+ .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
+ .shared_lock_type = SMB2_LOCKFLAG_SHARED,
.unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
.header_size = sizeof(struct smb2_hdr),
.header_preamble_size = 0,
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 7e7909b1ae11..1b7ad0c09566 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -163,7 +163,7 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
return 0;
spin_lock(&cifs_tcp_ses_lock);
- if (tcon->tidStatus == CifsExiting) {
+ if (tcon->status == TID_EXITING) {
/*
* only tree disconnect, open, and write,
* (and ulogoff which does not have tcon)
@@ -2734,13 +2734,10 @@ int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
goto err_free_req;
}
- trace_smb3_posix_mkdir_done(xid, le64_to_cpu(rsp->PersistentFileId),
- tcon->tid,
- ses->Suid, CREATE_NOT_FILE,
- FILE_WRITE_ATTRIBUTES);
+ trace_smb3_posix_mkdir_done(xid, rsp->PersistentFileId, tcon->tid, ses->Suid,
+ CREATE_NOT_FILE, FILE_WRITE_ATTRIBUTES);
- SMB2_close(xid, tcon, le64_to_cpu(rsp->PersistentFileId),
- le64_to_cpu(rsp->VolatileFileId));
+ SMB2_close(xid, tcon, rsp->PersistentFileId, rsp->VolatileFileId);
/* Eventually save off posix specific response info and timestaps */
@@ -3009,14 +3006,12 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
} else if (rsp == NULL) /* unlikely to happen, but safer to check */
goto creat_exit;
else
- trace_smb3_open_done(xid, le64_to_cpu(rsp->PersistentFileId),
- tcon->tid,
- ses->Suid, oparms->create_options,
- oparms->desired_access);
+ trace_smb3_open_done(xid, rsp->PersistentFileId, tcon->tid, ses->Suid,
+ oparms->create_options, oparms->desired_access);
atomic_inc(&tcon->num_remote_opens);
- oparms->fid->persistent_fid = le64_to_cpu(rsp->PersistentFileId);
- oparms->fid->volatile_fid = le64_to_cpu(rsp->VolatileFileId);
+ oparms->fid->persistent_fid = rsp->PersistentFileId;
+ oparms->fid->volatile_fid = rsp->VolatileFileId;
oparms->fid->access = oparms->desired_access;
#ifdef CONFIG_CIFS_DEBUG2
oparms->fid->mid = le64_to_cpu(rsp->hdr.MessageId);
@@ -3313,8 +3308,8 @@ SMB2_close_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
if (rc)
return rc;
- req->PersistentFileId = cpu_to_le64(persistent_fid);
- req->VolatileFileId = cpu_to_le64(volatile_fid);
+ req->PersistentFileId = persistent_fid;
+ req->VolatileFileId = volatile_fid;
if (query_attrs)
req->Flags = SMB2_CLOSE_FLAG_POSTQUERY_ATTRIB;
else
@@ -3677,8 +3672,8 @@ SMB2_notify_init(const unsigned int xid, struct smb_rqst *rqst,
if (rc)
return rc;
- req->PersistentFileId = cpu_to_le64(persistent_fid);
- req->VolatileFileId = cpu_to_le64(volatile_fid);
+ req->PersistentFileId = persistent_fid;
+ req->VolatileFileId = volatile_fid;
/* See note 354 of MS-SMB2, 64K max */
req->OutputBufferLength =
cpu_to_le32(SMB2_MAX_BUFFER_SIZE - MAX_SMB2_HDR_SIZE);
@@ -3858,12 +3853,14 @@ void smb2_reconnect_server(struct work_struct *work)
tcon = kzalloc(sizeof(struct cifs_tcon), GFP_KERNEL);
if (!tcon) {
resched = true;
- list_del_init(&ses->rlist);
- cifs_put_smb_ses(ses);
+ list_for_each_entry_safe(ses, ses2, &tmp_ses_list, rlist) {
+ list_del_init(&ses->rlist);
+ cifs_put_smb_ses(ses);
+ }
goto done;
}
- tcon->tidStatus = CifsGood;
+ tcon->status = TID_GOOD;
tcon->retry = false;
tcon->need_reconnect = false;
@@ -3951,8 +3948,8 @@ SMB2_flush_init(const unsigned int xid, struct smb_rqst *rqst,
if (rc)
return rc;
- req->PersistentFileId = cpu_to_le64(persistent_fid);
- req->VolatileFileId = cpu_to_le64(volatile_fid);
+ req->PersistentFileId = persistent_fid;
+ req->VolatileFileId = volatile_fid;
iov[0].iov_base = (char *)req;
iov[0].iov_len = total_len;
@@ -4033,8 +4030,8 @@ smb2_new_read_req(void **buf, unsigned int *total_len,
shdr = &req->hdr;
shdr->Id.SyncId.ProcessId = cpu_to_le32(io_parms->pid);
- req->PersistentFileId = cpu_to_le64(io_parms->persistent_fid);
- req->VolatileFileId = cpu_to_le64(io_parms->volatile_fid);
+ req->PersistentFileId = io_parms->persistent_fid;
+ req->VolatileFileId = io_parms->volatile_fid;
req->ReadChannelInfoOffset = 0; /* reserved */
req->ReadChannelInfoLength = 0; /* reserved */
req->Channel = 0; /* reserved */
@@ -4094,8 +4091,8 @@ smb2_new_read_req(void **buf, unsigned int *total_len,
*/
shdr->SessionId = cpu_to_le64(0xFFFFFFFFFFFFFFFF);
shdr->Id.SyncId.TreeId = cpu_to_le32(0xFFFFFFFF);
- req->PersistentFileId = cpu_to_le64(0xFFFFFFFFFFFFFFFF);
- req->VolatileFileId = cpu_to_le64(0xFFFFFFFFFFFFFFFF);
+ req->PersistentFileId = (u64)-1;
+ req->VolatileFileId = (u64)-1;
}
}
if (remaining_bytes > io_parms->length)
@@ -4307,21 +4304,19 @@ SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
cifs_stats_fail_inc(io_parms->tcon, SMB2_READ_HE);
cifs_dbg(VFS, "Send error in read = %d\n", rc);
trace_smb3_read_err(xid,
- le64_to_cpu(req->PersistentFileId),
+ req->PersistentFileId,
io_parms->tcon->tid, ses->Suid,
io_parms->offset, io_parms->length,
rc);
} else
- trace_smb3_read_done(xid,
- le64_to_cpu(req->PersistentFileId),
- io_parms->tcon->tid, ses->Suid,
- io_parms->offset, 0);
+ trace_smb3_read_done(xid, req->PersistentFileId, io_parms->tcon->tid,
+ ses->Suid, io_parms->offset, 0);
free_rsp_buf(resp_buftype, rsp_iov.iov_base);
cifs_small_buf_release(req);
return rc == -ENODATA ? 0 : rc;
} else
trace_smb3_read_done(xid,
- le64_to_cpu(req->PersistentFileId),
+ req->PersistentFileId,
io_parms->tcon->tid, ses->Suid,
io_parms->offset, io_parms->length);
@@ -4463,8 +4458,8 @@ smb2_async_writev(struct cifs_writedata *wdata,
shdr = (struct smb2_hdr *)req;
shdr->Id.SyncId.ProcessId = cpu_to_le32(wdata->cfile->pid);
- req->PersistentFileId = cpu_to_le64(wdata->cfile->fid.persistent_fid);
- req->VolatileFileId = cpu_to_le64(wdata->cfile->fid.volatile_fid);
+ req->PersistentFileId = wdata->cfile->fid.persistent_fid;
+ req->VolatileFileId = wdata->cfile->fid.volatile_fid;
req->WriteChannelInfoOffset = 0;
req->WriteChannelInfoLength = 0;
req->Channel = 0;
@@ -4562,7 +4557,7 @@ smb2_async_writev(struct cifs_writedata *wdata,
if (rc) {
trace_smb3_write_err(0 /* no xid */,
- le64_to_cpu(req->PersistentFileId),
+ req->PersistentFileId,
tcon->tid, tcon->ses->Suid, wdata->offset,
wdata->bytes, rc);
kref_put(&wdata->refcount, release);
@@ -4615,8 +4610,8 @@ SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
req->hdr.Id.SyncId.ProcessId = cpu_to_le32(io_parms->pid);
- req->PersistentFileId = cpu_to_le64(io_parms->persistent_fid);
- req->VolatileFileId = cpu_to_le64(io_parms->volatile_fid);
+ req->PersistentFileId = io_parms->persistent_fid;
+ req->VolatileFileId = io_parms->volatile_fid;
req->WriteChannelInfoOffset = 0;
req->WriteChannelInfoLength = 0;
req->Channel = 0;
@@ -4645,7 +4640,7 @@ SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
if (rc) {
trace_smb3_write_err(xid,
- le64_to_cpu(req->PersistentFileId),
+ req->PersistentFileId,
io_parms->tcon->tid,
io_parms->tcon->ses->Suid,
io_parms->offset, io_parms->length, rc);
@@ -4654,7 +4649,7 @@ SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
} else {
*nbytes = le32_to_cpu(rsp->DataLength);
trace_smb3_write_done(xid,
- le64_to_cpu(req->PersistentFileId),
+ req->PersistentFileId,
io_parms->tcon->tid,
io_parms->tcon->ses->Suid,
io_parms->offset, *nbytes);
diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
index 33cfd0a1adf1..d8c4388b190d 100644
--- a/fs/cifs/smb2pdu.h
+++ b/fs/cifs/smb2pdu.h
@@ -56,16 +56,6 @@ struct smb2_rdma_crypto_transform {
#define COMPOUND_FID 0xFFFFFFFFFFFFFFFFULL
-#define SMB2_ERROR_STRUCTURE_SIZE2 cpu_to_le16(9)
-
-struct smb2_err_rsp {
- struct smb2_hdr hdr;
- __le16 StructureSize;
- __le16 Reserved; /* MBZ */
- __le32 ByteCount; /* even if zero, at least one byte follows */
- __u8 ErrorData[1]; /* variable length */
-} __packed;
-
#define SYMLINK_ERROR_TAG 0x4c4d5953
struct smb2_symlink_err_rsp {
@@ -139,47 +129,6 @@ struct share_redirect_error_context_rsp {
#define SMB2_LEASE_HANDLE_CACHING_HE 0x02
#define SMB2_LEASE_WRITE_CACHING_HE 0x04
-#define SMB2_LEASE_NONE cpu_to_le32(0x00)
-#define SMB2_LEASE_READ_CACHING cpu_to_le32(0x01)
-#define SMB2_LEASE_HANDLE_CACHING cpu_to_le32(0x02)
-#define SMB2_LEASE_WRITE_CACHING cpu_to_le32(0x04)
-
-#define SMB2_LEASE_FLAG_BREAK_IN_PROGRESS cpu_to_le32(0x00000002)
-#define SMB2_LEASE_FLAG_PARENT_LEASE_KEY_SET cpu_to_le32(0x00000004)
-
-#define SMB2_LEASE_KEY_SIZE 16
-
-struct lease_context {
- u8 LeaseKey[SMB2_LEASE_KEY_SIZE];
- __le32 LeaseState;
- __le32 LeaseFlags;
- __le64 LeaseDuration;
-} __packed;
-
-struct lease_context_v2 {
- u8 LeaseKey[SMB2_LEASE_KEY_SIZE];
- __le32 LeaseState;
- __le32 LeaseFlags;
- __le64 LeaseDuration;
- __le64 ParentLeaseKeyLow;
- __le64 ParentLeaseKeyHigh;
- __le16 Epoch;
- __le16 Reserved;
-} __packed;
-
-struct create_lease {
- struct create_context ccontext;
- __u8 Name[8];
- struct lease_context lcontext;
-} __packed;
-
-struct create_lease_v2 {
- struct create_context ccontext;
- __u8 Name[8];
- struct lease_context_v2 lcontext;
- __u8 Pad[4];
-} __packed;
-
struct create_durable {
struct create_context ccontext;
__u8 Name[8];
@@ -192,13 +141,6 @@ struct create_durable {
} Data;
} __packed;
-struct create_posix {
- struct create_context ccontext;
- __u8 Name[16];
- __le32 Mode;
- __u32 Reserved;
-} __packed;
-
/* See MS-SMB2 2.2.13.2.11 */
/* Flags */
#define SMB2_DHANDLE_FLAG_PERSISTENT 0x00000002
@@ -287,12 +229,6 @@ struct copychunk_ioctl {
__u32 Reserved2;
} __packed;
-/* this goes in the ioctl buffer when doing FSCTL_SET_ZERO_DATA */
-struct file_zero_data_information {
- __le64 FileOffset;
- __le64 BeyondFinalZero;
-} __packed;
-
struct copychunk_ioctl_rsp {
__le32 ChunksWritten;
__le32 ChunkBytesWritten;
@@ -338,11 +274,6 @@ struct fsctl_get_integrity_information_rsp {
__le32 ClusterSizeInBytes;
} __packed;
-struct file_allocated_range_buffer {
- __le64 file_offset;
- __le64 length;
-} __packed;
-
/* Integrity ChecksumAlgorithm choices for above */
#define CHECKSUM_TYPE_NONE 0x0000
#define CHECKSUM_TYPE_CRC64 0x0002
@@ -351,53 +282,6 @@ struct file_allocated_range_buffer {
/* Integrity flags for above */
#define FSCTL_INTEGRITY_FLAG_CHECKSUM_ENFORCEMENT_OFF 0x00000001
-/* Reparse structures - see MS-FSCC 2.1.2 */
-
-/* struct fsctl_reparse_info_req is empty, only response structs (see below) */
-
-struct reparse_data_buffer {
- __le32 ReparseTag;
- __le16 ReparseDataLength;
- __u16 Reserved;
- __u8 DataBuffer[]; /* Variable Length */
-} __packed;
-
-struct reparse_guid_data_buffer {
- __le32 ReparseTag;
- __le16 ReparseDataLength;
- __u16 Reserved;
- __u8 ReparseGuid[16];
- __u8 DataBuffer[]; /* Variable Length */
-} __packed;
-
-struct reparse_mount_point_data_buffer {
- __le32 ReparseTag;
- __le16 ReparseDataLength;
- __u16 Reserved;
- __le16 SubstituteNameOffset;
- __le16 SubstituteNameLength;
- __le16 PrintNameOffset;
- __le16 PrintNameLength;
- __u8 PathBuffer[]; /* Variable Length */
-} __packed;
-
-#define SYMLINK_FLAG_RELATIVE 0x00000001
-
-struct reparse_symlink_data_buffer {
- __le32 ReparseTag;
- __le16 ReparseDataLength;
- __u16 Reserved;
- __le16 SubstituteNameOffset;
- __le16 SubstituteNameLength;
- __le16 PrintNameOffset;
- __le16 PrintNameLength;
- __le32 Flags;
- __u8 PathBuffer[]; /* Variable Length */
-} __packed;
-
-/* See MS-FSCC 2.1.2.6 and cifspdu.h for struct reparse_posix_data */
-
-
/* See MS-DFSC 2.2.2 */
struct fsctl_get_dfs_referral_req {
__le16 MaxReferralLevel;
@@ -413,22 +297,6 @@ struct network_resiliency_req {
} __packed;
/* There is no buffer for the response ie no struct network_resiliency_rsp */
-
-struct validate_negotiate_info_req {
- __le32 Capabilities;
- __u8 Guid[SMB2_CLIENT_GUID_SIZE];
- __le16 SecurityMode;
- __le16 DialectCount;
- __le16 Dialects[4]; /* BB expand this if autonegotiate > 4 dialects */
-} __packed;
-
-struct validate_negotiate_info_rsp {
- __le32 Capabilities;
- __u8 Guid[SMB2_CLIENT_GUID_SIZE];
- __le16 SecurityMode;
- __le16 Dialect; /* Dialect in use for the connection */
-} __packed;
-
#define RSS_CAPABLE cpu_to_le32(0x00000001)
#define RDMA_CAPABLE cpu_to_le32(0x00000002)
@@ -464,14 +332,6 @@ struct compress_ioctl {
__le16 CompressionState; /* See cifspdu.h for possible flag values */
} __packed;
-struct duplicate_extents_to_file {
- __u64 PersistentFileHandle; /* source file handle, opaque endianness */
- __u64 VolatileFileHandle;
- __le64 SourceFileOffset;
- __le64 TargetFileOffset;
- __le64 ByteCount; /* Bytes to be copied */
-} __packed;
-
/*
* Maximum number of iovs we need for an ioctl request.
* [0] : struct smb2_ioctl_req
@@ -479,370 +339,11 @@ struct duplicate_extents_to_file {
*/
#define SMB2_IOCTL_IOV_SIZE 2
-struct smb2_ioctl_req {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 57 */
- __u16 Reserved;
- __le32 CtlCode;
- __u64 PersistentFileId; /* opaque endianness */
- __u64 VolatileFileId; /* opaque endianness */
- __le32 InputOffset;
- __le32 InputCount;
- __le32 MaxInputResponse;
- __le32 OutputOffset;
- __le32 OutputCount;
- __le32 MaxOutputResponse;
- __le32 Flags;
- __u32 Reserved2;
- __u8 Buffer[];
-} __packed;
-
-struct smb2_ioctl_rsp {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 57 */
- __u16 Reserved;
- __le32 CtlCode;
- __u64 PersistentFileId; /* opaque endianness */
- __u64 VolatileFileId; /* opaque endianness */
- __le32 InputOffset;
- __le32 InputCount;
- __le32 OutputOffset;
- __le32 OutputCount;
- __le32 Flags;
- __u32 Reserved2;
- /* char * buffer[] */
-} __packed;
-
-#define SMB2_LOCKFLAG_SHARED_LOCK 0x0001
-#define SMB2_LOCKFLAG_EXCLUSIVE_LOCK 0x0002
-#define SMB2_LOCKFLAG_UNLOCK 0x0004
-#define SMB2_LOCKFLAG_FAIL_IMMEDIATELY 0x0010
-
-struct smb2_lock_element {
- __le64 Offset;
- __le64 Length;
- __le32 Flags;
- __le32 Reserved;
-} __packed;
-
-struct smb2_lock_req {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 48 */
- __le16 LockCount;
- /*
- * The least significant four bits are the index, the other 28 bits are
- * the lock sequence number (0 to 64). See MS-SMB2 2.2.26
- */
- __le32 LockSequenceNumber;
- __u64 PersistentFileId; /* opaque endianness */
- __u64 VolatileFileId; /* opaque endianness */
- /* Followed by at least one */
- struct smb2_lock_element locks[1];
-} __packed;
-
-struct smb2_lock_rsp {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 4 */
- __le16 Reserved;
-} __packed;
-
-struct smb2_echo_req {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 4 */
- __u16 Reserved;
-} __packed;
-
-struct smb2_echo_rsp {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 4 */
- __u16 Reserved;
-} __packed;
-
-/* search (query_directory) Flags field */
-#define SMB2_RESTART_SCANS 0x01
-#define SMB2_RETURN_SINGLE_ENTRY 0x02
-#define SMB2_INDEX_SPECIFIED 0x04
-#define SMB2_REOPEN 0x10
-
-#define SMB2_QUERY_DIRECTORY_IOV_SIZE 2
-
-/*
- * Valid FileInformation classes.
- *
- * Note that these are a subset of the (file) QUERY_INFO levels defined
- * later in this file (but since QUERY_DIRECTORY uses equivalent numbers
- * we do not redefine them here)
- *
- * FileDirectoryInfomation 0x01
- * FileFullDirectoryInformation 0x02
- * FileIdFullDirectoryInformation 0x26
- * FileBothDirectoryInformation 0x03
- * FileIdBothDirectoryInformation 0x25
- * FileNamesInformation 0x0C
- * FileIdExtdDirectoryInformation 0x3C
- */
-
-struct smb2_query_directory_req {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 33 */
- __u8 FileInformationClass;
- __u8 Flags;
- __le32 FileIndex;
- __u64 PersistentFileId; /* opaque endianness */
- __u64 VolatileFileId; /* opaque endianness */
- __le16 FileNameOffset;
- __le16 FileNameLength;
- __le32 OutputBufferLength;
- __u8 Buffer[1];
-} __packed;
-
-struct smb2_query_directory_rsp {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 9 */
- __le16 OutputBufferOffset;
- __le32 OutputBufferLength;
- __u8 Buffer[1];
-} __packed;
-
-/* Possible InfoType values */
-#define SMB2_O_INFO_FILE 0x01
-#define SMB2_O_INFO_FILESYSTEM 0x02
-#define SMB2_O_INFO_SECURITY 0x03
-#define SMB2_O_INFO_QUOTA 0x04
-
-/* Security info type additionalinfo flags. See MS-SMB2 (2.2.37) or MS-DTYP */
-#define OWNER_SECINFO 0x00000001
-#define GROUP_SECINFO 0x00000002
-#define DACL_SECINFO 0x00000004
-#define SACL_SECINFO 0x00000008
-#define LABEL_SECINFO 0x00000010
-#define ATTRIBUTE_SECINFO 0x00000020
-#define SCOPE_SECINFO 0x00000040
-#define BACKUP_SECINFO 0x00010000
-#define UNPROTECTED_SACL_SECINFO 0x10000000
-#define UNPROTECTED_DACL_SECINFO 0x20000000
-#define PROTECTED_SACL_SECINFO 0x40000000
-#define PROTECTED_DACL_SECINFO 0x80000000
-
-/* Flags used for FileFullEAinfo */
-#define SL_RESTART_SCAN 0x00000001
-#define SL_RETURN_SINGLE_ENTRY 0x00000002
-#define SL_INDEX_SPECIFIED 0x00000004
-
-struct smb2_query_info_req {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 41 */
- __u8 InfoType;
- __u8 FileInfoClass;
- __le32 OutputBufferLength;
- __le16 InputBufferOffset;
- __u16 Reserved;
- __le32 InputBufferLength;
- __le32 AdditionalInformation;
- __le32 Flags;
- __u64 PersistentFileId; /* opaque endianness */
- __u64 VolatileFileId; /* opaque endianness */
- __u8 Buffer[1];
-} __packed;
-
-struct smb2_query_info_rsp {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 9 */
- __le16 OutputBufferOffset;
- __le32 OutputBufferLength;
- __u8 Buffer[1];
-} __packed;
-
-/*
- * Maximum number of iovs we need for a set-info request.
- * The largest one is rename/hardlink
- * [0] : struct smb2_set_info_req + smb2_file_[rename|link]_info
- * [1] : path
- * [2] : compound padding
- */
-#define SMB2_SET_INFO_IOV_SIZE 3
-
-struct smb2_set_info_req {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 33 */
- __u8 InfoType;
- __u8 FileInfoClass;
- __le32 BufferLength;
- __le16 BufferOffset;
- __u16 Reserved;
- __le32 AdditionalInformation;
- __u64 PersistentFileId; /* opaque endianness */
- __u64 VolatileFileId; /* opaque endianness */
- __u8 Buffer[1];
-} __packed;
-
-struct smb2_set_info_rsp {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 2 */
-} __packed;
-
-struct smb2_oplock_break {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 24 */
- __u8 OplockLevel;
- __u8 Reserved;
- __le32 Reserved2;
- __u64 PersistentFid;
- __u64 VolatileFid;
-} __packed;
-
-#define SMB2_NOTIFY_BREAK_LEASE_FLAG_ACK_REQUIRED cpu_to_le32(0x01)
-
-struct smb2_lease_break {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 44 */
- __le16 Epoch;
- __le32 Flags;
- __u8 LeaseKey[16];
- __le32 CurrentLeaseState;
- __le32 NewLeaseState;
- __le32 BreakReason;
- __le32 AccessMaskHint;
- __le32 ShareMaskHint;
-} __packed;
-
-struct smb2_lease_ack {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 36 */
- __le16 Reserved;
- __le32 Flags;
- __u8 LeaseKey[16];
- __le32 LeaseState;
- __le64 LeaseDuration;
-} __packed;
-
/*
- * PDU infolevel structure definitions
+ * PDU query infolevel structure definitions
* BB consider moving to a different header
*/
-/* File System Information Classes */
-#define FS_VOLUME_INFORMATION 1 /* Query */
-#define FS_LABEL_INFORMATION 2 /* Local only */
-#define FS_SIZE_INFORMATION 3 /* Query */
-#define FS_DEVICE_INFORMATION 4 /* Query */
-#define FS_ATTRIBUTE_INFORMATION 5 /* Query */
-#define FS_CONTROL_INFORMATION 6 /* Query, Set */
-#define FS_FULL_SIZE_INFORMATION 7 /* Query */
-#define FS_OBJECT_ID_INFORMATION 8 /* Query, Set */
-#define FS_DRIVER_PATH_INFORMATION 9 /* Local only */
-#define FS_VOLUME_FLAGS_INFORMATION 10 /* Local only */
-#define FS_SECTOR_SIZE_INFORMATION 11 /* SMB3 or later. Query */
-#define FS_POSIX_INFORMATION 100 /* SMB3.1.1 POSIX. Query */
-
-struct smb2_fs_full_size_info {
- __le64 TotalAllocationUnits;
- __le64 CallerAvailableAllocationUnits;
- __le64 ActualAvailableAllocationUnits;
- __le32 SectorsPerAllocationUnit;
- __le32 BytesPerSector;
-} __packed;
-
-#define SSINFO_FLAGS_ALIGNED_DEVICE 0x00000001
-#define SSINFO_FLAGS_PARTITION_ALIGNED_ON_DEVICE 0x00000002
-#define SSINFO_FLAGS_NO_SEEK_PENALTY 0x00000004
-#define SSINFO_FLAGS_TRIM_ENABLED 0x00000008
-
-/* sector size info struct */
-struct smb3_fs_ss_info {
- __le32 LogicalBytesPerSector;
- __le32 PhysicalBytesPerSectorForAtomicity;
- __le32 PhysicalBytesPerSectorForPerf;
- __le32 FileSystemEffectivePhysicalBytesPerSectorForAtomicity;
- __le32 Flags;
- __le32 ByteOffsetForSectorAlignment;
- __le32 ByteOffsetForPartitionAlignment;
-} __packed;
-
-/* volume info struct - see MS-FSCC 2.5.9 */
-#define MAX_VOL_LABEL_LEN 32
-struct smb3_fs_vol_info {
- __le64 VolumeCreationTime;
- __u32 VolumeSerialNumber;
- __le32 VolumeLabelLength; /* includes trailing null */
- __u8 SupportsObjects; /* True if eg like NTFS, supports objects */
- __u8 Reserved;
- __u8 VolumeLabel[]; /* variable len */
-} __packed;
-
-/* partial list of QUERY INFO levels */
-#define FILE_DIRECTORY_INFORMATION 1
-#define FILE_FULL_DIRECTORY_INFORMATION 2
-#define FILE_BOTH_DIRECTORY_INFORMATION 3
-#define FILE_BASIC_INFORMATION 4
-#define FILE_STANDARD_INFORMATION 5
-#define FILE_INTERNAL_INFORMATION 6
-#define FILE_EA_INFORMATION 7
-#define FILE_ACCESS_INFORMATION 8
-#define FILE_NAME_INFORMATION 9
-#define FILE_RENAME_INFORMATION 10
-#define FILE_LINK_INFORMATION 11
-#define FILE_NAMES_INFORMATION 12
-#define FILE_DISPOSITION_INFORMATION 13
-#define FILE_POSITION_INFORMATION 14
-#define FILE_FULL_EA_INFORMATION 15
-#define FILE_MODE_INFORMATION 16
-#define FILE_ALIGNMENT_INFORMATION 17
-#define FILE_ALL_INFORMATION 18
-#define FILE_ALLOCATION_INFORMATION 19
-#define FILE_END_OF_FILE_INFORMATION 20
-#define FILE_ALTERNATE_NAME_INFORMATION 21
-#define FILE_STREAM_INFORMATION 22
-#define FILE_PIPE_INFORMATION 23
-#define FILE_PIPE_LOCAL_INFORMATION 24
-#define FILE_PIPE_REMOTE_INFORMATION 25
-#define FILE_MAILSLOT_QUERY_INFORMATION 26
-#define FILE_MAILSLOT_SET_INFORMATION 27
-#define FILE_COMPRESSION_INFORMATION 28
-#define FILE_OBJECT_ID_INFORMATION 29
-/* Number 30 not defined in documents */
-#define FILE_MOVE_CLUSTER_INFORMATION 31
-#define FILE_QUOTA_INFORMATION 32
-#define FILE_REPARSE_POINT_INFORMATION 33
-#define FILE_NETWORK_OPEN_INFORMATION 34
-#define FILE_ATTRIBUTE_TAG_INFORMATION 35
-#define FILE_TRACKING_INFORMATION 36
-#define FILEID_BOTH_DIRECTORY_INFORMATION 37
-#define FILEID_FULL_DIRECTORY_INFORMATION 38
-#define FILE_VALID_DATA_LENGTH_INFORMATION 39
-#define FILE_SHORT_NAME_INFORMATION 40
-#define FILE_SFIO_RESERVE_INFORMATION 44
-#define FILE_SFIO_VOLUME_INFORMATION 45
-#define FILE_HARD_LINK_INFORMATION 46
-#define FILE_NORMALIZED_NAME_INFORMATION 48
-#define FILEID_GLOBAL_TX_DIRECTORY_INFORMATION 50
-#define FILE_STANDARD_LINK_INFORMATION 54
-#define FILE_ID_INFORMATION 59
-#define FILE_ID_EXTD_DIRECTORY_INFORMATION 60
-
-struct smb2_file_internal_info {
- __le64 IndexNumber;
-} __packed; /* level 6 Query */
-
-struct smb2_file_rename_info { /* encoding of request for level 10 */
- __u8 ReplaceIfExists; /* 1 = replace existing target with new */
- /* 0 = fail if target already exists */
- __u8 Reserved[7];
- __u64 RootDirectory; /* MBZ for network operations (why says spec?) */
- __le32 FileNameLength;
- char FileName[]; /* New name to be assigned */
- /* padding - overall struct size must be >= 24 so filename + pad >= 6 */
-} __packed; /* level 10 Set */
-
-struct smb2_file_link_info { /* encoding of request for level 11 */
- __u8 ReplaceIfExists; /* 1 = replace existing link with new */
- /* 0 = fail if link already exists */
- __u8 Reserved[7];
- __u64 RootDirectory; /* MBZ for network operations (why says spec?) */
- __le32 FileNameLength;
- char FileName[]; /* Name to be assigned to new link */
-} __packed; /* level 11 Set */
-
struct smb2_file_full_ea_info { /* encoding of response for level 15 */
__le32 next_entry_offset;
__u8 flags;
@@ -851,38 +352,6 @@ struct smb2_file_full_ea_info { /* encoding of response for level 15 */
char ea_data[]; /* \0 terminated name plus value */
} __packed; /* level 15 Set */
-/*
- * This level 18, although with struct with same name is different from cifs
- * level 0x107. Level 0x107 has an extra u64 between AccessFlags and
- * CurrentByteOffset.
- */
-struct smb2_file_all_info { /* data block encoding of response to level 18 */
- __le64 CreationTime; /* Beginning of FILE_BASIC_INFO equivalent */
- __le64 LastAccessTime;
- __le64 LastWriteTime;
- __le64 ChangeTime;
- __le32 Attributes;
- __u32 Pad1; /* End of FILE_BASIC_INFO_INFO equivalent */
- __le64 AllocationSize; /* Beginning of FILE_STANDARD_INFO equivalent */
- __le64 EndOfFile; /* size ie offset to first free byte in file */
- __le32 NumberOfLinks; /* hard links */
- __u8 DeletePending;
- __u8 Directory;
- __u16 Pad2; /* End of FILE_STANDARD_INFO equivalent */
- __le64 IndexNumber;
- __le32 EASize;
- __le32 AccessFlags;
- __le64 CurrentByteOffset;
- __le32 Mode;
- __le32 AlignmentRequirement;
- __le32 FileNameLength;
- char FileName[1];
-} __packed; /* level 18 Query */
-
-struct smb2_file_eof_info { /* encoding of request for level 10 */
- __le64 EndOfFile; /* new end of file value */
-} __packed; /* level 20 Set */
-
struct smb2_file_reparse_point_info {
__le64 IndexNumber;
__le32 Tag;
@@ -935,6 +404,8 @@ struct create_posix_rsp {
struct cifs_sid group; /* var-sized on the wire */
} __packed;
+#define SMB2_QUERY_DIRECTORY_IOV_SIZE 2
+
/*
* SMB2-only POSIX info level for query dir
*
@@ -966,31 +437,6 @@ struct smb2_posix_info {
*/
} __packed;
-/* Level 100 query info */
-struct smb311_posix_qinfo {
- __le64 CreationTime;
- __le64 LastAccessTime;
- __le64 LastWriteTime;
- __le64 ChangeTime;
- __le64 EndOfFile;
- __le64 AllocationSize;
- __le32 DosAttributes;
- __le64 Inode;
- __le32 DeviceId;
- __le32 Zero;
- /* beginning of POSIX Create Context Response */
- __le32 HardLinks;
- __le32 ReparseTag;
- __le32 Mode;
- u8 Sids[];
- /*
- * var sized owner SID
- * var sized group SID
- * le32 filenamelength
- * u8 filename[]
- */
-} __packed;
-
/*
* Parsed version of the above struct. Allows direct access to the
* variable length fields
diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
index 4a7062fd1c26..a69f1eed1cfe 100644
--- a/fs/cifs/smb2proto.h
+++ b/fs/cifs/smb2proto.h
@@ -283,7 +283,7 @@ extern int smb311_update_preauth_hash(struct cifs_ses *ses,
struct kvec *iov, int nvec);
extern int smb2_query_info_compound(const unsigned int xid,
struct cifs_tcon *tcon,
- __le16 *utf16_path, u32 desired_access,
+ const char *path, u32 desired_access,
u32 class, u32 type, u32 output_len,
struct kvec *rsp, int *buftype,
struct cifs_sb_info *cifs_sb);
diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c
index 4fcca79f39ae..526a4c1bed99 100644
--- a/fs/crypto/crypto.c
+++ b/fs/crypto/crypto.c
@@ -248,7 +248,7 @@ EXPORT_SYMBOL(fscrypt_encrypt_block_inplace);
* which must still be locked and not uptodate. Normally, blocksize ==
* PAGE_SIZE and the whole page is decrypted at once.
*
- * This is for use by the filesystem's ->readpages() method.
+ * This is for use by the filesystem's ->readahead() method.
*
* Return: 0 on success; -errno on failure
*/
diff --git a/fs/exfat/exfat_fs.h b/fs/exfat/exfat_fs.h
index 619e5b4bed10..c6800b880920 100644
--- a/fs/exfat/exfat_fs.h
+++ b/fs/exfat/exfat_fs.h
@@ -203,7 +203,8 @@ struct exfat_mount_options {
/* on error: continue, panic, remount-ro */
enum exfat_error_mode errors;
unsigned utf8:1, /* Use of UTF-8 character set */
- discard:1; /* Issue discard requests on deletions */
+ discard:1, /* Issue discard requests on deletions */
+ keep_last_dots:1; /* Keep trailing periods in paths */
int time_offset; /* Offset of timestamps from UTC (in minutes) */
};
diff --git a/fs/exfat/file.c b/fs/exfat/file.c
index d890fd34bb2d..2f5130059236 100644
--- a/fs/exfat/file.c
+++ b/fs/exfat/file.c
@@ -218,8 +218,6 @@ int __exfat_truncate(struct inode *inode, loff_t new_size)
if (exfat_free_cluster(inode, &clu))
return -EIO;
- exfat_clear_volume_dirty(sb);
-
return 0;
}
diff --git a/fs/exfat/namei.c b/fs/exfat/namei.c
index af4eb39cc0c3..a02a04a993bf 100644
--- a/fs/exfat/namei.c
+++ b/fs/exfat/namei.c
@@ -65,11 +65,14 @@ static int exfat_d_revalidate(struct dentry *dentry, unsigned int flags)
return ret;
}
-/* returns the length of a struct qstr, ignoring trailing dots */
-static unsigned int exfat_striptail_len(unsigned int len, const char *name)
+/* returns the length of a struct qstr, ignoring trailing dots if necessary */
+static unsigned int exfat_striptail_len(unsigned int len, const char *name,
+ bool keep_last_dots)
{
- while (len && name[len - 1] == '.')
- len--;
+ if (!keep_last_dots) {
+ while (len && name[len - 1] == '.')
+ len--;
+ }
return len;
}
@@ -83,7 +86,8 @@ static int exfat_d_hash(const struct dentry *dentry, struct qstr *qstr)
struct super_block *sb = dentry->d_sb;
struct nls_table *t = EXFAT_SB(sb)->nls_io;
const unsigned char *name = qstr->name;
- unsigned int len = exfat_striptail_len(qstr->len, qstr->name);
+ unsigned int len = exfat_striptail_len(qstr->len, qstr->name,
+ EXFAT_SB(sb)->options.keep_last_dots);
unsigned long hash = init_name_hash(dentry);
int i, charlen;
wchar_t c;
@@ -104,8 +108,10 @@ static int exfat_d_cmp(const struct dentry *dentry, unsigned int len,
{
struct super_block *sb = dentry->d_sb;
struct nls_table *t = EXFAT_SB(sb)->nls_io;
- unsigned int alen = exfat_striptail_len(name->len, name->name);
- unsigned int blen = exfat_striptail_len(len, str);
+ unsigned int alen = exfat_striptail_len(name->len, name->name,
+ EXFAT_SB(sb)->options.keep_last_dots);
+ unsigned int blen = exfat_striptail_len(len, str,
+ EXFAT_SB(sb)->options.keep_last_dots);
wchar_t c1, c2;
int charlen, i;
@@ -136,7 +142,8 @@ static int exfat_utf8_d_hash(const struct dentry *dentry, struct qstr *qstr)
{
struct super_block *sb = dentry->d_sb;
const unsigned char *name = qstr->name;
- unsigned int len = exfat_striptail_len(qstr->len, qstr->name);
+ unsigned int len = exfat_striptail_len(qstr->len, qstr->name,
+ EXFAT_SB(sb)->options.keep_last_dots);
unsigned long hash = init_name_hash(dentry);
int i, charlen;
unicode_t u;
@@ -161,8 +168,11 @@ static int exfat_utf8_d_cmp(const struct dentry *dentry, unsigned int len,
const char *str, const struct qstr *name)
{
struct super_block *sb = dentry->d_sb;
- unsigned int alen = exfat_striptail_len(name->len, name->name);
- unsigned int blen = exfat_striptail_len(len, str);
+ unsigned int alen = exfat_striptail_len(name->len, name->name,
+ EXFAT_SB(sb)->options.keep_last_dots);
+ unsigned int blen = exfat_striptail_len(len, str,
+ EXFAT_SB(sb)->options.keep_last_dots);
+
unicode_t u_a, u_b;
int charlen, i;
@@ -416,13 +426,25 @@ static int __exfat_resolve_path(struct inode *inode, const unsigned char *path,
struct super_block *sb = inode->i_sb;
struct exfat_sb_info *sbi = EXFAT_SB(sb);
struct exfat_inode_info *ei = EXFAT_I(inode);
+ int pathlen = strlen(path);
- /* strip all trailing periods */
- namelen = exfat_striptail_len(strlen(path), path);
+ /*
+ * get the length of the pathname excluding
+ * trailing periods, if any.
+ */
+ namelen = exfat_striptail_len(pathlen, path, false);
+ if (EXFAT_SB(sb)->options.keep_last_dots) {
+ /*
+ * Do not allow the creation of files with names
+ * ending with period(s).
+ */
+ if (!lookup && (namelen < pathlen))
+ return -EINVAL;
+ namelen = pathlen;
+ }
if (!namelen)
return -ENOENT;
-
- if (strlen(path) > (MAX_NAME_LENGTH * MAX_CHARSET_SIZE))
+ if (pathlen > (MAX_NAME_LENGTH * MAX_CHARSET_SIZE))
return -ENAMETOOLONG;
/*
@@ -554,7 +576,6 @@ static int exfat_create(struct user_namespace *mnt_userns, struct inode *dir,
exfat_set_volume_dirty(sb);
err = exfat_add_entry(dir, dentry->d_name.name, &cdir, TYPE_FILE,
&info);
- exfat_clear_volume_dirty(sb);
if (err)
goto unlock;
@@ -812,7 +833,6 @@ static int exfat_unlink(struct inode *dir, struct dentry *dentry)
/* This doesn't modify ei */
ei->dir.dir = DIR_DELETED;
- exfat_clear_volume_dirty(sb);
inode_inc_iversion(dir);
dir->i_mtime = dir->i_atime = current_time(dir);
@@ -846,7 +866,6 @@ static int exfat_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
exfat_set_volume_dirty(sb);
err = exfat_add_entry(dir, dentry->d_name.name, &cdir, TYPE_DIR,
&info);
- exfat_clear_volume_dirty(sb);
if (err)
goto unlock;
@@ -976,7 +995,6 @@ static int exfat_rmdir(struct inode *dir, struct dentry *dentry)
goto unlock;
}
ei->dir.dir = DIR_DELETED;
- exfat_clear_volume_dirty(sb);
inode_inc_iversion(dir);
dir->i_mtime = dir->i_atime = current_time(dir);
@@ -1311,7 +1329,6 @@ del_out:
*/
new_ei->dir.dir = DIR_DELETED;
}
- exfat_clear_volume_dirty(sb);
out:
return ret;
}
diff --git a/fs/exfat/super.c b/fs/exfat/super.c
index 9f892903419a..8ca21e7917d1 100644
--- a/fs/exfat/super.c
+++ b/fs/exfat/super.c
@@ -100,7 +100,6 @@ static int exfat_set_vol_flags(struct super_block *sb, unsigned short new_flags)
{
struct exfat_sb_info *sbi = EXFAT_SB(sb);
struct boot_sector *p_boot = (struct boot_sector *)sbi->boot_bh->b_data;
- bool sync;
/* retain persistent-flags */
new_flags |= sbi->vol_flags_persistent;
@@ -119,16 +118,11 @@ static int exfat_set_vol_flags(struct super_block *sb, unsigned short new_flags)
p_boot->vol_flags = cpu_to_le16(new_flags);
- if ((new_flags & VOLUME_DIRTY) && !buffer_dirty(sbi->boot_bh))
- sync = true;
- else
- sync = false;
-
set_buffer_uptodate(sbi->boot_bh);
mark_buffer_dirty(sbi->boot_bh);
- if (sync)
- sync_dirty_buffer(sbi->boot_bh);
+ __sync_dirty_buffer(sbi->boot_bh, REQ_SYNC | REQ_FUA | REQ_PREFLUSH);
+
return 0;
}
@@ -174,6 +168,8 @@ static int exfat_show_options(struct seq_file *m, struct dentry *root)
seq_puts(m, ",errors=remount-ro");
if (opts->discard)
seq_puts(m, ",discard");
+ if (opts->keep_last_dots)
+ seq_puts(m, ",keep_last_dots");
if (opts->time_offset)
seq_printf(m, ",time_offset=%d", opts->time_offset);
return 0;
@@ -217,6 +213,7 @@ enum {
Opt_charset,
Opt_errors,
Opt_discard,
+ Opt_keep_last_dots,
Opt_time_offset,
/* Deprecated options */
@@ -243,6 +240,7 @@ static const struct fs_parameter_spec exfat_parameters[] = {
fsparam_string("iocharset", Opt_charset),
fsparam_enum("errors", Opt_errors, exfat_param_enums),
fsparam_flag("discard", Opt_discard),
+ fsparam_flag("keep_last_dots", Opt_keep_last_dots),
fsparam_s32("time_offset", Opt_time_offset),
__fsparam(NULL, "utf8", Opt_utf8, fs_param_deprecated,
NULL),
@@ -297,6 +295,9 @@ static int exfat_parse_param(struct fs_context *fc, struct fs_parameter *param)
case Opt_discard:
opts->discard = 1;
break;
+ case Opt_keep_last_dots:
+ opts->keep_last_dots = 1;
+ break;
case Opt_time_offset:
/*
* Make the limit 24 just in case someone invents something
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 8bd66cdc41be..6feb07e3e1eb 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -267,7 +267,7 @@ static ssize_t ext4_buffered_write_iter(struct kiocb *iocb,
goto out;
current->backing_dev_info = inode_to_bdi(inode);
- ret = generic_perform_write(iocb->ki_filp, from, iocb->ki_pos);
+ ret = generic_perform_write(iocb, from);
current->backing_dev_info = NULL;
out:
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 1ce13f69fbec..13740f2d0e61 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -3589,7 +3589,7 @@ const struct iomap_ops ext4_iomap_report_ops = {
static bool ext4_journalled_dirty_folio(struct address_space *mapping,
struct folio *folio)
{
- WARN_ON_ONCE(!page_has_buffers(&folio->page));
+ WARN_ON_ONCE(!folio_buffers(folio));
folio_set_checked(folio);
return filemap_dirty_folio(mapping, folio);
}
diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c
index 1aa26d6634fc..af491e170c4a 100644
--- a/fs/ext4/readpage.c
+++ b/fs/ext4/readpage.c
@@ -109,7 +109,7 @@ static void verity_work(struct work_struct *work)
struct bio *bio = ctx->bio;
/*
- * fsverity_verify_bio() may call readpages() again, and although verity
+ * fsverity_verify_bio() may call readahead() again, and although verity
* will be disabled for that, decryption may still be needed, causing
* another bio_post_read_ctx to be allocated. So to guarantee that
* mempool_alloc() never deadlocks we must free the current ctx first.
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index a8fc4fa511a8..f5366feea82d 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -456,7 +456,7 @@ static bool f2fs_dirty_meta_folio(struct address_space *mapping,
folio_mark_uptodate(folio);
if (!folio_test_dirty(folio)) {
filemap_dirty_folio(mapping, folio);
- inc_page_count(F2FS_P_SB(&folio->page), F2FS_DIRTY_META);
+ inc_page_count(F2FS_M_SB(mapping), F2FS_DIRTY_META);
set_page_private_reference(&folio->page);
return true;
}
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index f8fcbe91059b..8e0c2e773c8d 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -164,7 +164,7 @@ static void f2fs_verify_bio(struct work_struct *work)
bool may_have_compressed_pages = (ctx->enabled_steps & STEP_DECOMPRESS);
/*
- * fsverity_verify_bio() may call readpages() again, and while verity
+ * fsverity_verify_bio() may call readahead() again, and while verity
* will be disabled for this, decryption and/or decompression may still
* be needed, resulting in another bio_post_read_ctx being allocated.
* So to prevent deadlocks we need to release the current ctx to the
@@ -2392,7 +2392,7 @@ static void f2fs_readahead(struct readahead_control *rac)
if (!f2fs_is_compress_backend_ready(inode))
return;
- /* If the file has inline data, skip readpages */
+ /* If the file has inline data, skip readahead */
if (f2fs_has_inline_data(inode))
return;
@@ -3571,7 +3571,7 @@ static bool f2fs_dirty_data_folio(struct address_space *mapping,
f2fs_update_dirty_folio(inode, folio);
return true;
}
- return true;
+ return false;
}
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index d3f39a704b8b..5b89af0f27f0 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -4448,7 +4448,7 @@ static ssize_t f2fs_buffered_write_iter(struct kiocb *iocb,
return -EOPNOTSUPP;
current->backing_dev_info = inode_to_bdi(inode);
- ret = generic_perform_write(file, from, iocb->ki_pos);
+ ret = generic_perform_write(iocb, from);
current->backing_dev_info = NULL;
if (ret > 0) {
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 0b6e741e94a0..c45d341dcf6e 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -2146,11 +2146,11 @@ static bool f2fs_dirty_node_folio(struct address_space *mapping,
folio_mark_uptodate(folio);
#ifdef CONFIG_F2FS_CHECK_FS
if (IS_INODE(&folio->page))
- f2fs_inode_chksum_set(F2FS_P_SB(&folio->page), &folio->page);
+ f2fs_inode_chksum_set(F2FS_M_SB(mapping), &folio->page);
#endif
if (!folio_test_dirty(folio)) {
filemap_dirty_folio(mapping, folio);
- inc_page_count(F2FS_P_SB(&folio->page), F2FS_DIRTY_NODES);
+ inc_page_count(F2FS_M_SB(mapping), F2FS_DIRTY_NODES);
set_page_private_reference(&folio->page);
return true;
}
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index eac4984cc753..488b460e046f 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -627,7 +627,7 @@ struct fuse_conn {
/** Connection successful. Only set in INIT */
unsigned conn_init:1;
- /** Do readpages asynchronously? Only set in INIT */
+ /** Do readahead asynchronously? Only set in INIT */
unsigned async_read:1;
/** Return an unique read error after abort. Only set in INIT */
diff --git a/fs/internal.h b/fs/internal.h
index fb2c2ea807d7..08503dc68d2b 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -74,7 +74,7 @@ int do_linkat(int olddfd, struct filename *old, int newdfd,
* namespace.c
*/
extern struct vfsmount *lookup_mnt(const struct path *);
-extern int finish_automount(struct vfsmount *, struct path *);
+extern int finish_automount(struct vfsmount *, const struct path *);
extern int sb_prepare_remount_readonly(struct super_block *);
diff --git a/fs/io_uring.c b/fs/io_uring.c
index b94d57c1b0e5..a8413f006417 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -611,6 +611,7 @@ struct io_sr_msg {
int msg_flags;
int bgid;
size_t len;
+ size_t done_io;
};
struct io_open {
@@ -781,6 +782,7 @@ enum {
REQ_F_SKIP_LINK_CQES_BIT,
REQ_F_SINGLE_POLL_BIT,
REQ_F_DOUBLE_POLL_BIT,
+ REQ_F_PARTIAL_IO_BIT,
/* keep async read/write and isreg together and in order */
REQ_F_SUPPORT_NOWAIT_BIT,
REQ_F_ISREG_BIT,
@@ -843,6 +845,8 @@ enum {
REQ_F_SINGLE_POLL = BIT(REQ_F_SINGLE_POLL_BIT),
/* double poll may active */
REQ_F_DOUBLE_POLL = BIT(REQ_F_DOUBLE_POLL_BIT),
+ /* request has already done partial IO */
+ REQ_F_PARTIAL_IO = BIT(REQ_F_PARTIAL_IO_BIT),
};
struct async_poll {
@@ -923,7 +927,6 @@ struct io_kiocb {
struct io_wq_work_node comp_list;
atomic_t refs;
atomic_t poll_refs;
- struct io_kiocb *link;
struct io_task_work io_task_work;
/* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */
struct hlist_node hash_node;
@@ -931,9 +934,11 @@ struct io_kiocb {
struct async_poll *apoll;
/* opcode allocated if it needs to store data for async defer */
void *async_data;
- /* custom credentials, valid IFF REQ_F_CREDS is set */
/* stores selected buf, valid IFF REQ_F_BUFFER_SELECTED is set */
struct io_buffer *kbuf;
+ /* linked requests, IFF REQ_F_HARDLINK or REQ_F_LINK are set */
+ struct io_kiocb *link;
+ /* custom credentials, valid IFF REQ_F_CREDS is set */
const struct cred *creds;
struct io_wq_work work;
};
@@ -962,6 +967,7 @@ struct io_op_def {
/* set if opcode supports polled "wait" */
unsigned pollin : 1;
unsigned pollout : 1;
+ unsigned poll_exclusive : 1;
/* op supports buffer selection */
unsigned buffer_select : 1;
/* do prep async if is going to be punted */
@@ -1056,6 +1062,7 @@ static const struct io_op_def io_op_defs[] = {
.needs_file = 1,
.unbound_nonreg_file = 1,
.pollin = 1,
+ .poll_exclusive = 1,
},
[IORING_OP_ASYNC_CANCEL] = {
.audit_skip = 1,
@@ -1330,6 +1337,8 @@ static unsigned int __io_put_kbuf(struct io_kiocb *req, struct list_head *list)
static inline unsigned int io_put_kbuf_comp(struct io_kiocb *req)
{
+ lockdep_assert_held(&req->ctx->completion_lock);
+
if (likely(!(req->flags & REQ_F_BUFFER_SELECTED)))
return 0;
return __io_put_kbuf(req, &req->ctx->io_buffers_comp);
@@ -1362,6 +1371,8 @@ static inline unsigned int io_put_kbuf(struct io_kiocb *req,
cflags = __io_put_kbuf(req, &ctx->io_buffers_comp);
spin_unlock(&ctx->completion_lock);
} else {
+ lockdep_assert_held(&req->ctx->uring_lock);
+
cflags = __io_put_kbuf(req, &req->ctx->io_buffers_cache);
}
@@ -1382,7 +1393,7 @@ static struct io_buffer_list *io_buffer_get_list(struct io_ring_ctx *ctx,
return NULL;
}
-static void io_kbuf_recycle(struct io_kiocb *req)
+static void io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
{
struct io_ring_ctx *ctx = req->ctx;
struct io_buffer_list *bl;
@@ -1390,6 +1401,12 @@ static void io_kbuf_recycle(struct io_kiocb *req)
if (likely(!(req->flags & REQ_F_BUFFER_SELECTED)))
return;
+ /* don't recycle if we already did IO to this buffer */
+ if (req->flags & REQ_F_PARTIAL_IO)
+ return;
+
+ if (issue_flags & IO_URING_F_UNLOCKED)
+ mutex_lock(&ctx->uring_lock);
lockdep_assert_held(&ctx->uring_lock);
@@ -1398,6 +1415,9 @@ static void io_kbuf_recycle(struct io_kiocb *req)
list_add(&buf->list, &bl->buf_list);
req->flags &= ~REQ_F_BUFFER_SELECTED;
req->kbuf = NULL;
+
+ if (issue_flags & IO_URING_F_UNLOCKED)
+ mutex_unlock(&ctx->uring_lock);
}
static bool io_match_task(struct io_kiocb *head, struct task_struct *task,
@@ -2104,6 +2124,12 @@ static void __io_req_complete_post(struct io_kiocb *req, s32 res,
}
}
io_req_put_rsrc(req, ctx);
+ /*
+ * Selected buffer deallocation in io_clean_op() assumes that
+ * we don't hold ->completion_lock. Clean them here to avoid
+ * deadlocks.
+ */
+ io_put_kbuf_comp(req);
io_dismantle_req(req);
io_put_task(req->task, 1);
wq_list_add_head(&req->comp_list, &ctx->locked_free_list);
@@ -2148,7 +2174,7 @@ static inline void io_req_complete(struct io_kiocb *req, s32 res)
static void io_req_complete_failed(struct io_kiocb *req, s32 res)
{
req_set_fail(req);
- io_req_complete_post(req, res, io_put_kbuf(req, 0));
+ io_req_complete_post(req, res, io_put_kbuf(req, IO_URING_F_UNLOCKED));
}
static void io_req_complete_fail_submit(struct io_kiocb *req)
@@ -2437,6 +2463,8 @@ static void handle_prev_tw_list(struct io_wq_work_node *node,
struct io_kiocb *req = container_of(node, struct io_kiocb,
io_task_work.node);
+ prefetch(container_of(next, struct io_kiocb, io_task_work.node));
+
if (req->ctx != *ctx) {
if (unlikely(!*uring_locked && *ctx))
ctx_commit_and_unlock(*ctx);
@@ -2469,6 +2497,8 @@ static void handle_tw_list(struct io_wq_work_node *node,
struct io_kiocb *req = container_of(node, struct io_kiocb,
io_task_work.node);
+ prefetch(container_of(next, struct io_kiocb, io_task_work.node));
+
if (req->ctx != *ctx) {
ctx_flush_and_put(*ctx, locked);
*ctx = req->ctx;
@@ -2974,8 +3004,12 @@ static bool io_rw_should_reissue(struct io_kiocb *req)
static bool __io_complete_rw_common(struct io_kiocb *req, long res)
{
- if (req->rw.kiocb.ki_flags & IOCB_WRITE)
+ if (req->rw.kiocb.ki_flags & IOCB_WRITE) {
kiocb_end_write(req);
+ fsnotify_modify(req->file);
+ } else {
+ fsnotify_access(req->file);
+ }
if (unlikely(res != req->result)) {
if ((res == -EAGAIN || res == -EOPNOTSUPP) &&
io_rw_should_reissue(req)) {
@@ -4439,9 +4473,6 @@ static int io_msg_ring_prep(struct io_kiocb *req,
sqe->splice_fd_in || sqe->buf_index || sqe->personality))
return -EINVAL;
- if (req->file->f_op != &io_uring_fops)
- return -EBADFD;
-
req->msg.user_data = READ_ONCE(sqe->off);
req->msg.len = READ_ONCE(sqe->len);
return 0;
@@ -4451,14 +4482,18 @@ static int io_msg_ring(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_ring_ctx *target_ctx;
struct io_msg *msg = &req->msg;
- int ret = -EOVERFLOW;
bool filled;
+ int ret;
+
+ ret = -EBADFD;
+ if (req->file->f_op != &io_uring_fops)
+ goto done;
+ ret = -EOVERFLOW;
target_ctx = req->file->private_data;
spin_lock(&target_ctx->completion_lock);
- filled = io_fill_cqe_aux(target_ctx, msg->user_data, msg->len,
- IORING_CQE_F_MSG);
+ filled = io_fill_cqe_aux(target_ctx, msg->user_data, msg->len, 0);
io_commit_cqring(target_ctx);
spin_unlock(&target_ctx->completion_lock);
@@ -4467,6 +4502,9 @@ static int io_msg_ring(struct io_kiocb *req, unsigned int issue_flags)
ret = 0;
}
+done:
+ if (ret < 0)
+ req_set_fail(req);
__io_req_complete(req, issue_flags, ret, 0);
return 0;
}
@@ -4537,6 +4575,8 @@ static int io_fallocate(struct io_kiocb *req, unsigned int issue_flags)
req->sync.len);
if (ret < 0)
req_set_fail(req);
+ else
+ fsnotify_modify(req->file);
io_req_complete(req, ret);
return 0;
}
@@ -5419,12 +5459,21 @@ static int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
if (req->ctx->compat)
sr->msg_flags |= MSG_CMSG_COMPAT;
#endif
+ sr->done_io = 0;
return 0;
}
+static bool io_net_retry(struct socket *sock, int flags)
+{
+ if (!(flags & MSG_WAITALL))
+ return false;
+ return sock->type == SOCK_STREAM || sock->type == SOCK_SEQPACKET;
+}
+
static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_async_msghdr iomsg, *kmsg;
+ struct io_sr_msg *sr = &req->sr_msg;
struct socket *sock;
struct io_buffer *kbuf;
unsigned flags;
@@ -5467,6 +5516,11 @@ static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
return io_setup_async_msg(req, kmsg);
if (ret == -ERESTARTSYS)
ret = -EINTR;
+ if (ret > 0 && io_net_retry(sock, flags)) {
+ sr->done_io += ret;
+ req->flags |= REQ_F_PARTIAL_IO;
+ return io_setup_async_msg(req, kmsg);
+ }
req_set_fail(req);
} else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
req_set_fail(req);
@@ -5476,6 +5530,10 @@ static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
if (kmsg->free_iov)
kfree(kmsg->free_iov);
req->flags &= ~REQ_F_NEED_CLEANUP;
+ if (ret >= 0)
+ ret += sr->done_io;
+ else if (sr->done_io)
+ ret = sr->done_io;
__io_req_complete(req, issue_flags, ret, io_put_kbuf(req, issue_flags));
return 0;
}
@@ -5526,12 +5584,23 @@ static int io_recv(struct io_kiocb *req, unsigned int issue_flags)
return -EAGAIN;
if (ret == -ERESTARTSYS)
ret = -EINTR;
+ if (ret > 0 && io_net_retry(sock, flags)) {
+ sr->len -= ret;
+ sr->buf += ret;
+ sr->done_io += ret;
+ req->flags |= REQ_F_PARTIAL_IO;
+ return -EAGAIN;
+ }
req_set_fail(req);
} else if ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
out_free:
req_set_fail(req);
}
+ if (ret >= 0)
+ ret += sr->done_io;
+ else if (sr->done_io)
+ ret = sr->done_io;
__io_req_complete(req, issue_flags, ret, io_put_kbuf(req, issue_flags));
return 0;
}
@@ -5569,9 +5638,6 @@ static int io_accept(struct io_kiocb *req, unsigned int issue_flags)
struct file *file;
int ret, fd;
- if (req->file->f_flags & O_NONBLOCK)
- req->flags |= REQ_F_NOWAIT;
-
if (!fixed) {
fd = __get_unused_fd_flags(accept->flags, accept->nofile);
if (unlikely(fd < 0))
@@ -5801,7 +5867,7 @@ struct io_poll_table {
};
#define IO_POLL_CANCEL_FLAG BIT(31)
-#define IO_POLL_REF_MASK ((1u << 20)-1)
+#define IO_POLL_REF_MASK GENMASK(30, 0)
/*
* If refs part of ->poll_refs (see IO_POLL_REF_MASK) is 0, it's free. We can
@@ -6035,10 +6101,13 @@ static void io_poll_cancel_req(struct io_kiocb *req)
io_poll_execute(req, 0, 0);
}
+#define wqe_to_req(wait) ((void *)((unsigned long) (wait)->private & ~1))
+#define wqe_is_double(wait) ((unsigned long) (wait)->private & 1)
+
static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
void *key)
{
- struct io_kiocb *req = wait->private;
+ struct io_kiocb *req = wqe_to_req(wait);
struct io_poll_iocb *poll = container_of(wait, struct io_poll_iocb,
wait);
__poll_t mask = key_to_poll(key);
@@ -6076,7 +6145,10 @@ static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
if (mask && poll->events & EPOLLONESHOT) {
list_del_init(&poll->wait.entry);
poll->head = NULL;
- req->flags &= ~REQ_F_SINGLE_POLL;
+ if (wqe_is_double(wait))
+ req->flags &= ~REQ_F_DOUBLE_POLL;
+ else
+ req->flags &= ~REQ_F_SINGLE_POLL;
}
__io_poll_execute(req, mask, poll->events);
}
@@ -6088,6 +6160,7 @@ static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
struct io_poll_iocb **poll_ptr)
{
struct io_kiocb *req = pt->req;
+ unsigned long wqe_private = (unsigned long) req;
/*
* The file being polled uses multiple waitqueues for poll handling
@@ -6113,6 +6186,8 @@ static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
pt->error = -ENOMEM;
return;
}
+ /* mark as double wq entry */
+ wqe_private |= 1;
req->flags |= REQ_F_DOUBLE_POLL;
io_init_poll_iocb(poll, first->events, first->wait.func);
*poll_ptr = poll;
@@ -6123,7 +6198,7 @@ static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
req->flags |= REQ_F_SINGLE_POLL;
pt->nr_entries++;
poll->head = head;
- poll->wait.private = req;
+ poll->wait.private = (void *) wqe_private;
if (poll->events & EPOLLEXCLUSIVE)
add_wait_queue_exclusive(head, &poll->wait);
@@ -6150,7 +6225,6 @@ static int __io_arm_poll_handler(struct io_kiocb *req,
INIT_HLIST_NODE(&req->hash_node);
io_init_poll_iocb(poll, mask, io_poll_wake);
poll->file = req->file;
- poll->wait.private = req;
ipt->pt._key = mask;
ipt->req = req;
@@ -6238,7 +6312,8 @@ static int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
} else {
mask |= POLLOUT | POLLWRNORM;
}
-
+ if (def->poll_exclusive)
+ mask |= EPOLLEXCLUSIVE;
if (!(issue_flags & IO_URING_F_UNLOCKED) &&
!list_empty(&ctx->apoll_cache)) {
apoll = list_first_entry(&ctx->apoll_cache, struct async_poll,
@@ -6254,6 +6329,8 @@ static int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
req->flags |= REQ_F_POLLED;
ipt.pt._qproc = io_async_queue_proc;
+ io_kbuf_recycle(req, issue_flags);
+
ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask);
if (ret || ipt.error)
return ret ? IO_APOLL_READY : IO_APOLL_ABORTED;
@@ -6281,6 +6358,7 @@ static __cold bool io_poll_remove_all(struct io_ring_ctx *ctx,
list = &ctx->cancel_hash[i];
hlist_for_each_entry_safe(req, tmp, list, hash_node) {
if (io_match_task_safe(req, tsk, cancel_all)) {
+ hlist_del_init(&req->hash_node);
io_poll_cancel_req(req);
found = true;
}
@@ -7075,8 +7153,11 @@ fail:
static void io_clean_op(struct io_kiocb *req)
{
- if (req->flags & REQ_F_BUFFER_SELECTED)
+ if (req->flags & REQ_F_BUFFER_SELECTED) {
+ spin_lock(&req->ctx->completion_lock);
io_put_kbuf_comp(req);
+ spin_unlock(&req->ctx->completion_lock);
+ }
if (req->flags & REQ_F_NEED_CLEANUP) {
switch (req->opcode) {
@@ -7505,11 +7586,9 @@ static void io_queue_sqe_arm_apoll(struct io_kiocb *req)
* Queued up for async execution, worker will release
* submit reference when the iocb is actually submitted.
*/
- io_kbuf_recycle(req);
io_queue_async_work(req, NULL);
break;
case IO_APOLL_OK:
- io_kbuf_recycle(req);
break;
}
@@ -8053,6 +8132,13 @@ static int io_sq_thread(void *data)
needs_sched = false;
break;
}
+
+ /*
+ * Ensure the store of the wakeup flag is not
+ * reordered with the load of the SQ tail
+ */
+ smp_mb();
+
if (io_sqring_entries(ctx)) {
needs_sched = false;
break;
@@ -8782,6 +8868,7 @@ static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset)
fput(fpl->fp[i]);
} else {
kfree_skb(skb);
+ free_uid(fpl->user);
kfree(fpl);
}
diff --git a/fs/ioctl.c b/fs/ioctl.c
index 090bf47606ab..80ac36aea913 100644
--- a/fs/ioctl.c
+++ b/fs/ioctl.c
@@ -173,7 +173,7 @@ int fiemap_prep(struct inode *inode, struct fiemap_extent_info *fieinfo,
if (*len == 0)
return -EINVAL;
- if (start > maxbytes)
+ if (start >= maxbytes)
return -EFBIG;
/*
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index 49dccd9050f1..8ce8720093b9 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -435,18 +435,17 @@ bool iomap_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
{
struct iomap_page *iop = to_iomap_page(folio);
struct inode *inode = folio->mapping->host;
- size_t len;
unsigned first, last, i;
if (!iop)
return false;
- /* Limit range to this folio */
- len = min(folio_size(folio) - from, count);
+ /* Caller's range may extend past the end of this folio */
+ count = min(folio_size(folio) - from, count);
- /* First and last blocks in range within page */
+ /* First and last blocks in range within folio */
first = from >> inode->i_blkbits;
- last = (from + len - 1) >> inode->i_blkbits;
+ last = (from + count - 1) >> inode->i_blkbits;
for (i = first; i <= last; i++)
if (!test_bit(i, iop->uptodate))
diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
index 74067a73ff78..88423069407c 100644
--- a/fs/kernfs/file.c
+++ b/fs/kernfs/file.c
@@ -120,13 +120,8 @@ static void *kernfs_seq_start(struct seq_file *sf, loff_t *ppos)
if (next == ERR_PTR(-ENODEV))
kernfs_seq_stop_active(sf, next);
return next;
- } else {
- /*
- * The same behavior and code as single_open(). Returns
- * !NULL if pos is at the beginning; otherwise, NULL.
- */
- return NULL + !*ppos;
}
+ return single_start(sf, ppos);
}
static void *kernfs_seq_next(struct seq_file *sf, void *v, loff_t *ppos)
diff --git a/fs/ksmbd/oplock.c b/fs/ksmbd/oplock.c
index 077b8761d099..23871b18a429 100644
--- a/fs/ksmbd/oplock.c
+++ b/fs/ksmbd/oplock.c
@@ -656,8 +656,8 @@ static void __smb2_oplock_break_noti(struct work_struct *wk)
rsp->OplockLevel = SMB2_OPLOCK_LEVEL_NONE;
rsp->Reserved = 0;
rsp->Reserved2 = 0;
- rsp->PersistentFid = cpu_to_le64(fp->persistent_id);
- rsp->VolatileFid = cpu_to_le64(fp->volatile_id);
+ rsp->PersistentFid = fp->persistent_id;
+ rsp->VolatileFid = fp->volatile_id;
inc_rfc1001_len(work->response_buf, 24);
diff --git a/fs/ksmbd/server.c b/fs/ksmbd/server.c
index 2e12f6d8483b..4cd03d661df0 100644
--- a/fs/ksmbd/server.c
+++ b/fs/ksmbd/server.c
@@ -585,7 +585,7 @@ static int __init ksmbd_server_init(void)
if (ret)
goto err_crypto_destroy;
- pr_warn_once("The ksmbd server is experimental, use at your own risk.\n");
+ pr_warn_once("The ksmbd server is experimental\n");
return 0;
diff --git a/fs/ksmbd/smb2pdu.c b/fs/ksmbd/smb2pdu.c
index 67e8e28e3fc3..3bf6c56c654c 100644
--- a/fs/ksmbd/smb2pdu.c
+++ b/fs/ksmbd/smb2pdu.c
@@ -377,12 +377,8 @@ static void init_chained_smb2_rsp(struct ksmbd_work *work)
* command in the compound request
*/
if (req->Command == SMB2_CREATE && rsp->Status == STATUS_SUCCESS) {
- work->compound_fid =
- le64_to_cpu(((struct smb2_create_rsp *)rsp)->
- VolatileFileId);
- work->compound_pfid =
- le64_to_cpu(((struct smb2_create_rsp *)rsp)->
- PersistentFileId);
+ work->compound_fid = ((struct smb2_create_rsp *)rsp)->VolatileFileId;
+ work->compound_pfid = ((struct smb2_create_rsp *)rsp)->PersistentFileId;
work->compound_sid = le64_to_cpu(rsp->SessionId);
}
@@ -2129,7 +2125,7 @@ static noinline int create_smb2_pipe(struct ksmbd_work *work)
rsp->EndofFile = cpu_to_le64(0);
rsp->FileAttributes = FILE_ATTRIBUTE_NORMAL_LE;
rsp->Reserved2 = 0;
- rsp->VolatileFileId = cpu_to_le64(id);
+ rsp->VolatileFileId = id;
rsp->PersistentFileId = 0;
rsp->CreateContextsOffset = 0;
rsp->CreateContextsLength = 0;
@@ -3157,8 +3153,8 @@ int smb2_open(struct ksmbd_work *work)
rsp->Reserved2 = 0;
- rsp->PersistentFileId = cpu_to_le64(fp->persistent_id);
- rsp->VolatileFileId = cpu_to_le64(fp->volatile_id);
+ rsp->PersistentFileId = fp->persistent_id;
+ rsp->VolatileFileId = fp->volatile_id;
rsp->CreateContextsOffset = 0;
rsp->CreateContextsLength = 0;
@@ -3865,9 +3861,7 @@ int smb2_query_dir(struct ksmbd_work *work)
goto err_out2;
}
- dir_fp = ksmbd_lookup_fd_slow(work,
- le64_to_cpu(req->VolatileFileId),
- le64_to_cpu(req->PersistentFileId));
+ dir_fp = ksmbd_lookup_fd_slow(work, req->VolatileFileId, req->PersistentFileId);
if (!dir_fp) {
rc = -EBADF;
goto err_out2;
@@ -4088,12 +4082,12 @@ static int smb2_get_info_file_pipe(struct ksmbd_session *sess,
* Windows can sometime send query file info request on
* pipe without opening it, checking error condition here
*/
- id = le64_to_cpu(req->VolatileFileId);
+ id = req->VolatileFileId;
if (!ksmbd_session_rpc_method(sess, id))
return -ENOENT;
ksmbd_debug(SMB, "FileInfoClass %u, FileId 0x%llx\n",
- req->FileInfoClass, le64_to_cpu(req->VolatileFileId));
+ req->FileInfoClass, req->VolatileFileId);
switch (req->FileInfoClass) {
case FILE_STANDARD_INFORMATION:
@@ -4738,7 +4732,7 @@ static int smb2_get_info_file(struct ksmbd_work *work,
}
if (work->next_smb2_rcv_hdr_off) {
- if (!has_file_id(le64_to_cpu(req->VolatileFileId))) {
+ if (!has_file_id(req->VolatileFileId)) {
ksmbd_debug(SMB, "Compound request set FID = %llu\n",
work->compound_fid);
id = work->compound_fid;
@@ -4747,8 +4741,8 @@ static int smb2_get_info_file(struct ksmbd_work *work,
}
if (!has_file_id(id)) {
- id = le64_to_cpu(req->VolatileFileId);
- pid = le64_to_cpu(req->PersistentFileId);
+ id = req->VolatileFileId;
+ pid = req->PersistentFileId;
}
fp = ksmbd_lookup_fd_slow(work, id, pid);
@@ -5113,7 +5107,7 @@ static int smb2_get_info_sec(struct ksmbd_work *work,
}
if (work->next_smb2_rcv_hdr_off) {
- if (!has_file_id(le64_to_cpu(req->VolatileFileId))) {
+ if (!has_file_id(req->VolatileFileId)) {
ksmbd_debug(SMB, "Compound request set FID = %llu\n",
work->compound_fid);
id = work->compound_fid;
@@ -5122,8 +5116,8 @@ static int smb2_get_info_sec(struct ksmbd_work *work,
}
if (!has_file_id(id)) {
- id = le64_to_cpu(req->VolatileFileId);
- pid = le64_to_cpu(req->PersistentFileId);
+ id = req->VolatileFileId;
+ pid = req->PersistentFileId;
}
fp = ksmbd_lookup_fd_slow(work, id, pid);
@@ -5221,7 +5215,7 @@ static noinline int smb2_close_pipe(struct ksmbd_work *work)
struct smb2_close_req *req = smb2_get_msg(work->request_buf);
struct smb2_close_rsp *rsp = smb2_get_msg(work->response_buf);
- id = le64_to_cpu(req->VolatileFileId);
+ id = req->VolatileFileId;
ksmbd_session_rpc_close(work->sess, id);
rsp->StructureSize = cpu_to_le16(60);
@@ -5280,7 +5274,7 @@ int smb2_close(struct ksmbd_work *work)
}
if (work->next_smb2_rcv_hdr_off &&
- !has_file_id(le64_to_cpu(req->VolatileFileId))) {
+ !has_file_id(req->VolatileFileId)) {
if (!has_file_id(work->compound_fid)) {
/* file already closed, return FILE_CLOSED */
ksmbd_debug(SMB, "file already closed\n");
@@ -5299,7 +5293,7 @@ int smb2_close(struct ksmbd_work *work)
work->compound_pfid = KSMBD_NO_FID;
}
} else {
- volatile_id = le64_to_cpu(req->VolatileFileId);
+ volatile_id = req->VolatileFileId;
}
ksmbd_debug(SMB, "volatile_id = %llu\n", volatile_id);
@@ -5988,7 +5982,7 @@ int smb2_set_info(struct ksmbd_work *work)
if (work->next_smb2_rcv_hdr_off) {
req = ksmbd_req_buf_next(work);
rsp = ksmbd_resp_buf_next(work);
- if (!has_file_id(le64_to_cpu(req->VolatileFileId))) {
+ if (!has_file_id(req->VolatileFileId)) {
ksmbd_debug(SMB, "Compound request set FID = %llu\n",
work->compound_fid);
id = work->compound_fid;
@@ -6000,8 +5994,8 @@ int smb2_set_info(struct ksmbd_work *work)
}
if (!has_file_id(id)) {
- id = le64_to_cpu(req->VolatileFileId);
- pid = le64_to_cpu(req->PersistentFileId);
+ id = req->VolatileFileId;
+ pid = req->PersistentFileId;
}
fp = ksmbd_lookup_fd_slow(work, id, pid);
@@ -6079,7 +6073,7 @@ static noinline int smb2_read_pipe(struct ksmbd_work *work)
struct smb2_read_req *req = smb2_get_msg(work->request_buf);
struct smb2_read_rsp *rsp = smb2_get_msg(work->response_buf);
- id = le64_to_cpu(req->VolatileFileId);
+ id = req->VolatileFileId;
inc_rfc1001_len(work->response_buf, 16);
rpc_resp = ksmbd_rpc_read(work->sess, id);
@@ -6215,8 +6209,7 @@ int smb2_read(struct ksmbd_work *work)
goto out;
}
- fp = ksmbd_lookup_fd_slow(work, le64_to_cpu(req->VolatileFileId),
- le64_to_cpu(req->PersistentFileId));
+ fp = ksmbd_lookup_fd_slow(work, req->VolatileFileId, req->PersistentFileId);
if (!fp) {
err = -ENOENT;
goto out;
@@ -6335,7 +6328,7 @@ static noinline int smb2_write_pipe(struct ksmbd_work *work)
size_t length;
length = le32_to_cpu(req->Length);
- id = le64_to_cpu(req->VolatileFileId);
+ id = req->VolatileFileId;
if (le16_to_cpu(req->DataOffset) ==
offsetof(struct smb2_write_req, Buffer)) {
@@ -6471,8 +6464,7 @@ int smb2_write(struct ksmbd_work *work)
goto out;
}
- fp = ksmbd_lookup_fd_slow(work, le64_to_cpu(req->VolatileFileId),
- le64_to_cpu(req->PersistentFileId));
+ fp = ksmbd_lookup_fd_slow(work, req->VolatileFileId, req->PersistentFileId);
if (!fp) {
err = -ENOENT;
goto out;
@@ -6584,12 +6576,9 @@ int smb2_flush(struct ksmbd_work *work)
WORK_BUFFERS(work, req, rsp);
- ksmbd_debug(SMB, "SMB2_FLUSH called for fid %llu\n",
- le64_to_cpu(req->VolatileFileId));
+ ksmbd_debug(SMB, "SMB2_FLUSH called for fid %llu\n", req->VolatileFileId);
- err = ksmbd_vfs_fsync(work,
- le64_to_cpu(req->VolatileFileId),
- le64_to_cpu(req->PersistentFileId));
+ err = ksmbd_vfs_fsync(work, req->VolatileFileId, req->PersistentFileId);
if (err)
goto out;
@@ -6618,8 +6607,7 @@ int smb2_cancel(struct ksmbd_work *work)
struct ksmbd_conn *conn = work->conn;
struct smb2_hdr *hdr = smb2_get_msg(work->request_buf);
struct smb2_hdr *chdr;
- struct ksmbd_work *cancel_work = NULL;
- int canceled = 0;
+ struct ksmbd_work *cancel_work = NULL, *iter;
struct list_head *command_list;
ksmbd_debug(SMB, "smb2 cancel called on mid %llu, async flags 0x%x\n",
@@ -6629,11 +6617,11 @@ int smb2_cancel(struct ksmbd_work *work)
command_list = &conn->async_requests;
spin_lock(&conn->request_lock);
- list_for_each_entry(cancel_work, command_list,
+ list_for_each_entry(iter, command_list,
async_request_entry) {
- chdr = smb2_get_msg(cancel_work->request_buf);
+ chdr = smb2_get_msg(iter->request_buf);
- if (cancel_work->async_id !=
+ if (iter->async_id !=
le64_to_cpu(hdr->Id.AsyncId))
continue;
@@ -6641,7 +6629,7 @@ int smb2_cancel(struct ksmbd_work *work)
"smb2 with AsyncId %llu cancelled command = 0x%x\n",
le64_to_cpu(hdr->Id.AsyncId),
le16_to_cpu(chdr->Command));
- canceled = 1;
+ cancel_work = iter;
break;
}
spin_unlock(&conn->request_lock);
@@ -6649,24 +6637,24 @@ int smb2_cancel(struct ksmbd_work *work)
command_list = &conn->requests;
spin_lock(&conn->request_lock);
- list_for_each_entry(cancel_work, command_list, request_entry) {
- chdr = smb2_get_msg(cancel_work->request_buf);
+ list_for_each_entry(iter, command_list, request_entry) {
+ chdr = smb2_get_msg(iter->request_buf);
if (chdr->MessageId != hdr->MessageId ||
- cancel_work == work)
+ iter == work)
continue;
ksmbd_debug(SMB,
"smb2 with mid %llu cancelled command = 0x%x\n",
le64_to_cpu(hdr->MessageId),
le16_to_cpu(chdr->Command));
- canceled = 1;
+ cancel_work = iter;
break;
}
spin_unlock(&conn->request_lock);
}
- if (canceled) {
+ if (cancel_work) {
cancel_work->state = KSMBD_WORK_CANCELLED;
if (cancel_work->cancel_fn)
cancel_work->cancel_fn(cancel_work->cancel_argv);
@@ -6804,12 +6792,9 @@ int smb2_lock(struct ksmbd_work *work)
int prior_lock = 0;
ksmbd_debug(SMB, "Received lock request\n");
- fp = ksmbd_lookup_fd_slow(work,
- le64_to_cpu(req->VolatileFileId),
- le64_to_cpu(req->PersistentFileId));
+ fp = ksmbd_lookup_fd_slow(work, req->VolatileFileId, req->PersistentFileId);
if (!fp) {
- ksmbd_debug(SMB, "Invalid file id for lock : %llu\n",
- le64_to_cpu(req->VolatileFileId));
+ ksmbd_debug(SMB, "Invalid file id for lock : %llu\n", req->VolatileFileId);
err = -ENOENT;
goto out2;
}
@@ -7164,8 +7149,8 @@ static int fsctl_copychunk(struct ksmbd_work *work,
ci_rsp = (struct copychunk_ioctl_rsp *)&rsp->Buffer[0];
- rsp->VolatileFileId = cpu_to_le64(volatile_id);
- rsp->PersistentFileId = cpu_to_le64(persistent_id);
+ rsp->VolatileFileId = volatile_id;
+ rsp->PersistentFileId = persistent_id;
ci_rsp->ChunksWritten =
cpu_to_le32(ksmbd_server_side_copy_max_chunk_count());
ci_rsp->ChunkBytesWritten =
@@ -7379,8 +7364,8 @@ ipv6_retry:
if (nii_rsp)
nii_rsp->Next = 0;
- rsp->PersistentFileId = cpu_to_le64(SMB2_NO_FID);
- rsp->VolatileFileId = cpu_to_le64(SMB2_NO_FID);
+ rsp->PersistentFileId = SMB2_NO_FID;
+ rsp->VolatileFileId = SMB2_NO_FID;
return nbytes;
}
@@ -7547,9 +7532,7 @@ static int fsctl_request_resume_key(struct ksmbd_work *work,
{
struct ksmbd_file *fp;
- fp = ksmbd_lookup_fd_slow(work,
- le64_to_cpu(req->VolatileFileId),
- le64_to_cpu(req->PersistentFileId));
+ fp = ksmbd_lookup_fd_slow(work, req->VolatileFileId, req->PersistentFileId);
if (!fp)
return -ENOENT;
@@ -7579,7 +7562,7 @@ int smb2_ioctl(struct ksmbd_work *work)
if (work->next_smb2_rcv_hdr_off) {
req = ksmbd_req_buf_next(work);
rsp = ksmbd_resp_buf_next(work);
- if (!has_file_id(le64_to_cpu(req->VolatileFileId))) {
+ if (!has_file_id(req->VolatileFileId)) {
ksmbd_debug(SMB, "Compound request set FID = %llu\n",
work->compound_fid);
id = work->compound_fid;
@@ -7590,14 +7573,14 @@ int smb2_ioctl(struct ksmbd_work *work)
}
if (!has_file_id(id))
- id = le64_to_cpu(req->VolatileFileId);
+ id = req->VolatileFileId;
if (req->Flags != cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL)) {
rsp->hdr.Status = STATUS_NOT_SUPPORTED;
goto out;
}
- cnt_code = le32_to_cpu(req->CntCode);
+ cnt_code = le32_to_cpu(req->CtlCode);
ret = smb2_calc_max_out_buf_len(work, 48,
le32_to_cpu(req->MaxOutputResponse));
if (ret < 0) {
@@ -7656,8 +7639,8 @@ int smb2_ioctl(struct ksmbd_work *work)
goto out;
nbytes = sizeof(struct validate_negotiate_info_rsp);
- rsp->PersistentFileId = cpu_to_le64(SMB2_NO_FID);
- rsp->VolatileFileId = cpu_to_le64(SMB2_NO_FID);
+ rsp->PersistentFileId = SMB2_NO_FID;
+ rsp->VolatileFileId = SMB2_NO_FID;
break;
case FSCTL_QUERY_NETWORK_INTERFACE_INFO:
ret = fsctl_query_iface_info_ioctl(conn, rsp, out_buf_len);
@@ -7703,10 +7686,10 @@ int smb2_ioctl(struct ksmbd_work *work)
rsp->PersistentFileId = req->PersistentFileId;
fsctl_copychunk(work,
(struct copychunk_ioctl_req *)&req->Buffer[0],
- le32_to_cpu(req->CntCode),
+ le32_to_cpu(req->CtlCode),
le32_to_cpu(req->InputCount),
- le64_to_cpu(req->VolatileFileId),
- le64_to_cpu(req->PersistentFileId),
+ req->VolatileFileId,
+ req->PersistentFileId,
rsp);
break;
case FSCTL_SET_SPARSE:
@@ -7857,7 +7840,7 @@ dup_ext_out:
goto out;
}
- rsp->CntCode = cpu_to_le32(cnt_code);
+ rsp->CtlCode = cpu_to_le32(cnt_code);
rsp->InputCount = cpu_to_le32(0);
rsp->InputOffset = cpu_to_le32(112);
rsp->OutputOffset = cpu_to_le32(112);
@@ -7903,8 +7886,8 @@ static void smb20_oplock_break_ack(struct ksmbd_work *work)
char req_oplevel = 0, rsp_oplevel = 0;
unsigned int oplock_change_type;
- volatile_id = le64_to_cpu(req->VolatileFid);
- persistent_id = le64_to_cpu(req->PersistentFid);
+ volatile_id = req->VolatileFid;
+ persistent_id = req->PersistentFid;
req_oplevel = req->OplockLevel;
ksmbd_debug(OPLOCK, "v_id %llu, p_id %llu request oplock level %d\n",
volatile_id, persistent_id, req_oplevel);
@@ -7999,8 +7982,8 @@ static void smb20_oplock_break_ack(struct ksmbd_work *work)
rsp->OplockLevel = rsp_oplevel;
rsp->Reserved = 0;
rsp->Reserved2 = 0;
- rsp->VolatileFid = cpu_to_le64(volatile_id);
- rsp->PersistentFid = cpu_to_le64(persistent_id);
+ rsp->VolatileFid = volatile_id;
+ rsp->PersistentFid = persistent_id;
inc_rfc1001_len(work->response_buf, 24);
return;
@@ -8500,7 +8483,7 @@ static void fill_transform_hdr(void *tr_buf, char *old_buf, __le16 cipher_type)
struct smb2_hdr *hdr = smb2_get_msg(old_buf);
unsigned int orig_len = get_rfc1002_len(old_buf);
- memset(tr_buf, 0, sizeof(struct smb2_transform_hdr) + 4);
+ /* tr_buf must be cleared by the caller */
tr_hdr->ProtocolId = SMB2_TRANSFORM_PROTO_NUM;
tr_hdr->OriginalMessageSize = cpu_to_le32(orig_len);
tr_hdr->Flags = cpu_to_le16(TRANSFORM_FLAG_ENCRYPTED);
diff --git a/fs/ksmbd/smb2pdu.h b/fs/ksmbd/smb2pdu.h
index d49468426576..af455278d005 100644
--- a/fs/ksmbd/smb2pdu.h
+++ b/fs/ksmbd/smb2pdu.h
@@ -16,42 +16,13 @@
#define FILE_CREATED 0x00000002
#define FILE_OVERWRITTEN 0x00000003
-/*
- * Size of the session key (crypto key encrypted with the password
- */
-#define SMB2_NTLMV2_SESSKEY_SIZE 16
-#define SMB2_SIGNATURE_SIZE 16
-#define SMB2_HMACSHA256_SIZE 32
-#define SMB2_CMACAES_SIZE 16
-#define SMB3_GCM128_CRYPTKEY_SIZE 16
-#define SMB3_GCM256_CRYPTKEY_SIZE 32
-
-/*
- * Size of the smb3 encryption/decryption keys
- */
-#define SMB3_ENC_DEC_KEY_SIZE 32
-
-/*
- * Size of the smb3 signing key
- */
-#define SMB3_SIGN_KEY_SIZE 16
-
-#define CIFS_CLIENT_CHALLENGE_SIZE 8
-#define SMB_SERVER_CHALLENGE_SIZE 8
-
/* SMB2 Max Credits */
#define SMB2_MAX_CREDITS 8192
-/* Maximum buffer size value we can send with 1 credit */
-#define SMB2_MAX_BUFFER_SIZE 65536
-
-#define NUMBER_OF_SMB2_COMMANDS 0x0013
-
/* BB FIXME - analyze following length BB */
#define MAX_SMB2_HDR_SIZE 0x78 /* 4 len + 64 hdr + (2*24 wct) + 2 bct + 2 pad */
#define SMB21_DEFAULT_IOSIZE (1024 * 1024)
-#define SMB3_DEFAULT_IOSIZE (4 * 1024 * 1024)
#define SMB3_DEFAULT_TRANS_SIZE (1024 * 1024)
#define SMB3_MIN_IOSIZE (64 * 1024)
#define SMB3_MAX_IOSIZE (8 * 1024 * 1024)
@@ -65,18 +36,6 @@
*
*/
-#define SMB2_ERROR_STRUCTURE_SIZE2 9
-#define SMB2_ERROR_STRUCTURE_SIZE2_LE cpu_to_le16(SMB2_ERROR_STRUCTURE_SIZE2)
-
-struct smb2_err_rsp {
- struct smb2_hdr hdr;
- __le16 StructureSize;
- __u8 ErrorContextCount;
- __u8 Reserved;
- __le32 ByteCount; /* even if zero, at least one byte follows */
- __u8 ErrorData[1]; /* variable length */
-} __packed;
-
struct preauth_integrity_info {
/* PreAuth integrity Hash ID */
__le16 Preauth_HashId;
@@ -116,8 +75,8 @@ struct create_durable_reconn_req {
union {
__u8 Reserved[16];
struct {
- __le64 PersistentFileId;
- __le64 VolatileFileId;
+ __u64 PersistentFileId;
+ __u64 VolatileFileId;
} Fid;
} Data;
} __packed;
@@ -126,8 +85,8 @@ struct create_durable_reconn_v2_req {
struct create_context ccontext;
__u8 Name[8];
struct {
- __le64 PersistentFileId;
- __le64 VolatileFileId;
+ __u64 PersistentFileId;
+ __u64 VolatileFileId;
} Fid;
__u8 CreateGuid[16];
__le32 Flags;
@@ -161,13 +120,6 @@ struct create_alloc_size_req {
__le64 AllocationSize;
} __packed;
-struct create_posix {
- struct create_context ccontext;
- __u8 Name[16];
- __le32 Mode;
- __u32 Reserved;
-} __packed;
-
struct create_durable_rsp {
struct create_context ccontext;
__u8 Name[8];
@@ -209,45 +161,6 @@ struct create_posix_rsp {
u8 SidBuffer[40];
} __packed;
-#define SMB2_LEASE_NONE_LE cpu_to_le32(0x00)
-#define SMB2_LEASE_READ_CACHING_LE cpu_to_le32(0x01)
-#define SMB2_LEASE_HANDLE_CACHING_LE cpu_to_le32(0x02)
-#define SMB2_LEASE_WRITE_CACHING_LE cpu_to_le32(0x04)
-
-#define SMB2_LEASE_FLAG_BREAK_IN_PROGRESS_LE cpu_to_le32(0x02)
-
-#define SMB2_LEASE_KEY_SIZE 16
-
-struct lease_context {
- __u8 LeaseKey[SMB2_LEASE_KEY_SIZE];
- __le32 LeaseState;
- __le32 LeaseFlags;
- __le64 LeaseDuration;
-} __packed;
-
-struct lease_context_v2 {
- __u8 LeaseKey[SMB2_LEASE_KEY_SIZE];
- __le32 LeaseState;
- __le32 LeaseFlags;
- __le64 LeaseDuration;
- __u8 ParentLeaseKey[SMB2_LEASE_KEY_SIZE];
- __le16 Epoch;
- __le16 Reserved;
-} __packed;
-
-struct create_lease {
- struct create_context ccontext;
- __u8 Name[8];
- struct lease_context lcontext;
-} __packed;
-
-struct create_lease_v2 {
- struct create_context ccontext;
- __u8 Name[8];
- struct lease_context_v2 lcontext;
- __u8 Pad[4];
-} __packed;
-
struct smb2_buffer_desc_v1 {
__le64 offset;
__le32 token;
@@ -256,63 +169,6 @@ struct smb2_buffer_desc_v1 {
#define SMB2_0_IOCTL_IS_FSCTL 0x00000001
-struct duplicate_extents_to_file {
- __u64 PersistentFileHandle; /* source file handle, opaque endianness */
- __u64 VolatileFileHandle;
- __le64 SourceFileOffset;
- __le64 TargetFileOffset;
- __le64 ByteCount; /* Bytes to be copied */
-} __packed;
-
-struct smb2_ioctl_req {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 57 */
- __le16 Reserved; /* offset from start of SMB2 header to write data */
- __le32 CntCode;
- __le64 PersistentFileId;
- __le64 VolatileFileId;
- __le32 InputOffset; /* Reserved MBZ */
- __le32 InputCount;
- __le32 MaxInputResponse;
- __le32 OutputOffset;
- __le32 OutputCount;
- __le32 MaxOutputResponse;
- __le32 Flags;
- __le32 Reserved2;
- __u8 Buffer[1];
-} __packed;
-
-struct smb2_ioctl_rsp {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 49 */
- __le16 Reserved; /* offset from start of SMB2 header to write data */
- __le32 CntCode;
- __le64 PersistentFileId;
- __le64 VolatileFileId;
- __le32 InputOffset; /* Reserved MBZ */
- __le32 InputCount;
- __le32 OutputOffset;
- __le32 OutputCount;
- __le32 Flags;
- __le32 Reserved2;
- __u8 Buffer[1];
-} __packed;
-
-struct validate_negotiate_info_req {
- __le32 Capabilities;
- __u8 Guid[SMB2_CLIENT_GUID_SIZE];
- __le16 SecurityMode;
- __le16 DialectCount;
- __le16 Dialects[1]; /* dialect (someday maybe list) client asked for */
-} __packed;
-
-struct validate_negotiate_info_rsp {
- __le32 Capabilities;
- __u8 Guid[SMB2_CLIENT_GUID_SIZE];
- __le16 SecurityMode;
- __le16 Dialect; /* Dialect in use for the connection */
-} __packed;
-
struct smb_sockaddr_in {
__be16 Port;
__be32 IPv4address;
@@ -357,7 +213,7 @@ struct file_object_buf_type1_ioctl_rsp {
} __packed;
struct resume_key_ioctl_rsp {
- __le64 ResumeKey[3];
+ __u64 ResumeKey[3];
__le32 ContextLength;
__u8 Context[4]; /* ignored, Windows sets to 4 bytes of zero */
} __packed;
@@ -386,167 +242,6 @@ struct file_sparse {
__u8 SetSparse;
} __packed;
-struct file_zero_data_information {
- __le64 FileOffset;
- __le64 BeyondFinalZero;
-} __packed;
-
-struct file_allocated_range_buffer {
- __le64 file_offset;
- __le64 length;
-} __packed;
-
-struct reparse_data_buffer {
- __le32 ReparseTag;
- __le16 ReparseDataLength;
- __u16 Reserved;
- __u8 DataBuffer[]; /* Variable Length */
-} __packed;
-
-/* SMB2 Notify Action Flags */
-#define FILE_ACTION_ADDED 0x00000001
-#define FILE_ACTION_REMOVED 0x00000002
-#define FILE_ACTION_MODIFIED 0x00000003
-#define FILE_ACTION_RENAMED_OLD_NAME 0x00000004
-#define FILE_ACTION_RENAMED_NEW_NAME 0x00000005
-#define FILE_ACTION_ADDED_STREAM 0x00000006
-#define FILE_ACTION_REMOVED_STREAM 0x00000007
-#define FILE_ACTION_MODIFIED_STREAM 0x00000008
-#define FILE_ACTION_REMOVED_BY_DELETE 0x00000009
-
-#define SMB2_LOCKFLAG_SHARED 0x0001
-#define SMB2_LOCKFLAG_EXCLUSIVE 0x0002
-#define SMB2_LOCKFLAG_UNLOCK 0x0004
-#define SMB2_LOCKFLAG_FAIL_IMMEDIATELY 0x0010
-#define SMB2_LOCKFLAG_MASK 0x0007
-
-struct smb2_lock_element {
- __le64 Offset;
- __le64 Length;
- __le32 Flags;
- __le32 Reserved;
-} __packed;
-
-struct smb2_lock_req {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 48 */
- __le16 LockCount;
- __le32 Reserved;
- __le64 PersistentFileId;
- __le64 VolatileFileId;
- /* Followed by at least one */
- struct smb2_lock_element locks[1];
-} __packed;
-
-struct smb2_lock_rsp {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 4 */
- __le16 Reserved;
-} __packed;
-
-struct smb2_echo_req {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 4 */
- __u16 Reserved;
-} __packed;
-
-struct smb2_echo_rsp {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 4 */
- __u16 Reserved;
-} __packed;
-
-/* search (query_directory) Flags field */
-#define SMB2_RESTART_SCANS 0x01
-#define SMB2_RETURN_SINGLE_ENTRY 0x02
-#define SMB2_INDEX_SPECIFIED 0x04
-#define SMB2_REOPEN 0x10
-
-struct smb2_query_directory_req {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 33 */
- __u8 FileInformationClass;
- __u8 Flags;
- __le32 FileIndex;
- __le64 PersistentFileId;
- __le64 VolatileFileId;
- __le16 FileNameOffset;
- __le16 FileNameLength;
- __le32 OutputBufferLength;
- __u8 Buffer[1];
-} __packed;
-
-struct smb2_query_directory_rsp {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 9 */
- __le16 OutputBufferOffset;
- __le32 OutputBufferLength;
- __u8 Buffer[1];
-} __packed;
-
-/* Possible InfoType values */
-#define SMB2_O_INFO_FILE 0x01
-#define SMB2_O_INFO_FILESYSTEM 0x02
-#define SMB2_O_INFO_SECURITY 0x03
-#define SMB2_O_INFO_QUOTA 0x04
-
-/* Security info type additionalinfo flags. See MS-SMB2 (2.2.37) or MS-DTYP */
-#define OWNER_SECINFO 0x00000001
-#define GROUP_SECINFO 0x00000002
-#define DACL_SECINFO 0x00000004
-#define SACL_SECINFO 0x00000008
-#define LABEL_SECINFO 0x00000010
-#define ATTRIBUTE_SECINFO 0x00000020
-#define SCOPE_SECINFO 0x00000040
-#define BACKUP_SECINFO 0x00010000
-#define UNPROTECTED_SACL_SECINFO 0x10000000
-#define UNPROTECTED_DACL_SECINFO 0x20000000
-#define PROTECTED_SACL_SECINFO 0x40000000
-#define PROTECTED_DACL_SECINFO 0x80000000
-
-struct smb2_query_info_req {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 41 */
- __u8 InfoType;
- __u8 FileInfoClass;
- __le32 OutputBufferLength;
- __le16 InputBufferOffset;
- __u16 Reserved;
- __le32 InputBufferLength;
- __le32 AdditionalInformation;
- __le32 Flags;
- __le64 PersistentFileId;
- __le64 VolatileFileId;
- __u8 Buffer[1];
-} __packed;
-
-struct smb2_query_info_rsp {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 9 */
- __le16 OutputBufferOffset;
- __le32 OutputBufferLength;
- __u8 Buffer[1];
-} __packed;
-
-struct smb2_set_info_req {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 33 */
- __u8 InfoType;
- __u8 FileInfoClass;
- __le32 BufferLength;
- __le16 BufferOffset;
- __u16 Reserved;
- __le32 AdditionalInformation;
- __le64 PersistentFileId;
- __le64 VolatileFileId;
- __u8 Buffer[1];
-} __packed;
-
-struct smb2_set_info_rsp {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 2 */
-} __packed;
-
/* FILE Info response size */
#define FILE_DIRECTORY_INFORMATION_SIZE 1
#define FILE_FULL_DIRECTORY_INFORMATION_SIZE 2
@@ -602,145 +297,11 @@ struct fs_type_info {
long magic_number;
} __packed;
-struct smb2_oplock_break {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 24 */
- __u8 OplockLevel;
- __u8 Reserved;
- __le32 Reserved2;
- __le64 PersistentFid;
- __le64 VolatileFid;
-} __packed;
-
-#define SMB2_NOTIFY_BREAK_LEASE_FLAG_ACK_REQUIRED cpu_to_le32(0x01)
-
-struct smb2_lease_break {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 44 */
- __le16 Epoch;
- __le32 Flags;
- __u8 LeaseKey[16];
- __le32 CurrentLeaseState;
- __le32 NewLeaseState;
- __le32 BreakReason;
- __le32 AccessMaskHint;
- __le32 ShareMaskHint;
-} __packed;
-
-struct smb2_lease_ack {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 36 */
- __le16 Reserved;
- __le32 Flags;
- __u8 LeaseKey[16];
- __le32 LeaseState;
- __le64 LeaseDuration;
-} __packed;
-
/*
- * PDU infolevel structure definitions
+ * PDU query infolevel structure definitions
* BB consider moving to a different header
*/
-/* File System Information Classes */
-#define FS_VOLUME_INFORMATION 1 /* Query */
-#define FS_LABEL_INFORMATION 2 /* Set */
-#define FS_SIZE_INFORMATION 3 /* Query */
-#define FS_DEVICE_INFORMATION 4 /* Query */
-#define FS_ATTRIBUTE_INFORMATION 5 /* Query */
-#define FS_CONTROL_INFORMATION 6 /* Query, Set */
-#define FS_FULL_SIZE_INFORMATION 7 /* Query */
-#define FS_OBJECT_ID_INFORMATION 8 /* Query, Set */
-#define FS_DRIVER_PATH_INFORMATION 9 /* Query */
-#define FS_SECTOR_SIZE_INFORMATION 11 /* SMB3 or later. Query */
-#define FS_POSIX_INFORMATION 100 /* SMB3.1.1 POSIX. Query */
-
-struct smb2_fs_full_size_info {
- __le64 TotalAllocationUnits;
- __le64 CallerAvailableAllocationUnits;
- __le64 ActualAvailableAllocationUnits;
- __le32 SectorsPerAllocationUnit;
- __le32 BytesPerSector;
-} __packed;
-
-#define SSINFO_FLAGS_ALIGNED_DEVICE 0x00000001
-#define SSINFO_FLAGS_PARTITION_ALIGNED_ON_DEVICE 0x00000002
-#define SSINFO_FLAGS_NO_SEEK_PENALTY 0x00000004
-#define SSINFO_FLAGS_TRIM_ENABLED 0x00000008
-
-/* sector size info struct */
-struct smb3_fs_ss_info {
- __le32 LogicalBytesPerSector;
- __le32 PhysicalBytesPerSectorForAtomicity;
- __le32 PhysicalBytesPerSectorForPerf;
- __le32 FSEffPhysicalBytesPerSectorForAtomicity;
- __le32 Flags;
- __le32 ByteOffsetForSectorAlignment;
- __le32 ByteOffsetForPartitionAlignment;
-} __packed;
-
-/* File System Control Information */
-struct smb2_fs_control_info {
- __le64 FreeSpaceStartFiltering;
- __le64 FreeSpaceThreshold;
- __le64 FreeSpaceStopFiltering;
- __le64 DefaultQuotaThreshold;
- __le64 DefaultQuotaLimit;
- __le32 FileSystemControlFlags;
- __le32 Padding;
-} __packed;
-
-/* partial list of QUERY INFO levels */
-#define FILE_DIRECTORY_INFORMATION 1
-#define FILE_FULL_DIRECTORY_INFORMATION 2
-#define FILE_BOTH_DIRECTORY_INFORMATION 3
-#define FILE_BASIC_INFORMATION 4
-#define FILE_STANDARD_INFORMATION 5
-#define FILE_INTERNAL_INFORMATION 6
-#define FILE_EA_INFORMATION 7
-#define FILE_ACCESS_INFORMATION 8
-#define FILE_NAME_INFORMATION 9
-#define FILE_RENAME_INFORMATION 10
-#define FILE_LINK_INFORMATION 11
-#define FILE_NAMES_INFORMATION 12
-#define FILE_DISPOSITION_INFORMATION 13
-#define FILE_POSITION_INFORMATION 14
-#define FILE_FULL_EA_INFORMATION 15
-#define FILE_MODE_INFORMATION 16
-#define FILE_ALIGNMENT_INFORMATION 17
-#define FILE_ALL_INFORMATION 18
-#define FILE_ALLOCATION_INFORMATION 19
-#define FILE_END_OF_FILE_INFORMATION 20
-#define FILE_ALTERNATE_NAME_INFORMATION 21
-#define FILE_STREAM_INFORMATION 22
-#define FILE_PIPE_INFORMATION 23
-#define FILE_PIPE_LOCAL_INFORMATION 24
-#define FILE_PIPE_REMOTE_INFORMATION 25
-#define FILE_MAILSLOT_QUERY_INFORMATION 26
-#define FILE_MAILSLOT_SET_INFORMATION 27
-#define FILE_COMPRESSION_INFORMATION 28
-#define FILE_OBJECT_ID_INFORMATION 29
-/* Number 30 not defined in documents */
-#define FILE_MOVE_CLUSTER_INFORMATION 31
-#define FILE_QUOTA_INFORMATION 32
-#define FILE_REPARSE_POINT_INFORMATION 33
-#define FILE_NETWORK_OPEN_INFORMATION 34
-#define FILE_ATTRIBUTE_TAG_INFORMATION 35
-#define FILE_TRACKING_INFORMATION 36
-#define FILEID_BOTH_DIRECTORY_INFORMATION 37
-#define FILEID_FULL_DIRECTORY_INFORMATION 38
-#define FILE_VALID_DATA_LENGTH_INFORMATION 39
-#define FILE_SHORT_NAME_INFORMATION 40
-#define FILE_SFIO_RESERVE_INFORMATION 44
-#define FILE_SFIO_VOLUME_INFORMATION 45
-#define FILE_HARD_LINK_INFORMATION 46
-#define FILE_NORMALIZED_NAME_INFORMATION 48
-#define FILEID_GLOBAL_TX_DIRECTORY_INFORMATION 50
-#define FILE_STANDARD_LINK_INFORMATION 54
-
-#define OP_BREAK_STRUCT_SIZE_20 24
-#define OP_BREAK_STRUCT_SIZE_21 36
-
struct smb2_file_access_info {
__le32 AccessFlags;
} __packed;
@@ -749,56 +310,6 @@ struct smb2_file_alignment_info {
__le32 AlignmentRequirement;
} __packed;
-struct smb2_file_internal_info {
- __le64 IndexNumber;
-} __packed; /* level 6 Query */
-
-struct smb2_file_rename_info { /* encoding of request for level 10 */
- __u8 ReplaceIfExists; /* 1 = replace existing target with new */
- /* 0 = fail if target already exists */
- __u8 Reserved[7];
- __u64 RootDirectory; /* MBZ for network operations (why says spec?) */
- __le32 FileNameLength;
- char FileName[]; /* New name to be assigned */
-} __packed; /* level 10 Set */
-
-struct smb2_file_link_info { /* encoding of request for level 11 */
- __u8 ReplaceIfExists; /* 1 = replace existing link with new */
- /* 0 = fail if link already exists */
- __u8 Reserved[7];
- __u64 RootDirectory; /* MBZ for network operations (why says spec?) */
- __le32 FileNameLength;
- char FileName[]; /* Name to be assigned to new link */
-} __packed; /* level 11 Set */
-
-/*
- * This level 18, although with struct with same name is different from cifs
- * level 0x107. Level 0x107 has an extra u64 between AccessFlags and
- * CurrentByteOffset.
- */
-struct smb2_file_all_info { /* data block encoding of response to level 18 */
- __le64 CreationTime; /* Beginning of FILE_BASIC_INFO equivalent */
- __le64 LastAccessTime;
- __le64 LastWriteTime;
- __le64 ChangeTime;
- __le32 Attributes;
- __u32 Pad1; /* End of FILE_BASIC_INFO_INFO equivalent */
- __le64 AllocationSize; /* Beginning of FILE_STANDARD_INFO equivalent */
- __le64 EndOfFile; /* size ie offset to first free byte in file */
- __le32 NumberOfLinks; /* hard links */
- __u8 DeletePending;
- __u8 Directory;
- __u16 Pad2; /* End of FILE_STANDARD_INFO equivalent */
- __le64 IndexNumber;
- __le32 EASize;
- __le32 AccessFlags;
- __le64 CurrentByteOffset;
- __le32 Mode;
- __le32 AlignmentRequirement;
- __le32 FileNameLength;
- char FileName[1];
-} __packed; /* level 18 Query */
-
struct smb2_file_basic_info { /* data block encoding of response to level 18 */
__le64 CreationTime; /* Beginning of FILE_BASIC_INFO equivalent */
__le64 LastAccessTime;
@@ -821,10 +332,6 @@ struct smb2_file_stream_info {
char StreamName[];
} __packed;
-struct smb2_file_eof_info { /* encoding of request for level 10 */
- __le64 EndOfFile; /* new end of file value */
-} __packed; /* level 20 Set */
-
struct smb2_file_ntwrk_info {
__le64 CreationTime;
__le64 LastAccessTime;
@@ -915,34 +422,6 @@ struct create_sd_buf_req {
struct smb_ntsd ntsd;
} __packed;
-/* Find File infolevels */
-#define SMB_FIND_FILE_POSIX_INFO 0x064
-
-/* Level 100 query info */
-struct smb311_posix_qinfo {
- __le64 CreationTime;
- __le64 LastAccessTime;
- __le64 LastWriteTime;
- __le64 ChangeTime;
- __le64 EndOfFile;
- __le64 AllocationSize;
- __le32 DosAttributes;
- __le64 Inode;
- __le32 DeviceId;
- __le32 Zero;
- /* beginning of POSIX Create Context Response */
- __le32 HardLinks;
- __le32 ReparseTag;
- __le32 Mode;
- u8 Sids[];
- /*
- * var sized owner SID
- * var sized group SID
- * le32 filenamelength
- * u8 filename[]
- */
-} __packed;
-
struct smb2_posix_info {
__le32 NextEntryOffset;
__u32 Ignored;
diff --git a/fs/ksmbd/transport_tcp.c b/fs/ksmbd/transport_tcp.c
index 82a1429bbe12..8fef9de787d3 100644
--- a/fs/ksmbd/transport_tcp.c
+++ b/fs/ksmbd/transport_tcp.c
@@ -476,7 +476,7 @@ static int ksmbd_netdev_event(struct notifier_block *nb, unsigned long event,
switch (event) {
case NETDEV_UP:
- if (netdev->priv_flags & IFF_BRIDGE_PORT)
+ if (netif_is_bridge_port(netdev))
return NOTIFY_OK;
list_for_each_entry(iface, &iface_list, entry) {
@@ -585,7 +585,7 @@ int ksmbd_tcp_set_interfaces(char *ifc_list, int ifc_list_sz)
rtnl_lock();
for_each_netdev(&init_net, netdev) {
- if (netdev->priv_flags & IFF_BRIDGE_PORT)
+ if (netif_is_bridge_port(netdev))
continue;
if (!alloc_iface(kstrdup(netdev->name, GFP_KERNEL)))
return -ENOMEM;
diff --git a/fs/namespace.c b/fs/namespace.c
index 6e9844b8c6fb..a0a36bfa3aa0 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -2112,22 +2112,23 @@ static int invent_group_ids(struct mount *mnt, bool recurse)
int count_mounts(struct mnt_namespace *ns, struct mount *mnt)
{
unsigned int max = READ_ONCE(sysctl_mount_max);
- unsigned int mounts = 0, old, pending, sum;
+ unsigned int mounts = 0;
struct mount *p;
+ if (ns->mounts >= max)
+ return -ENOSPC;
+ max -= ns->mounts;
+ if (ns->pending_mounts >= max)
+ return -ENOSPC;
+ max -= ns->pending_mounts;
+
for (p = mnt; p; p = next_mnt(p, mnt))
mounts++;
- old = ns->mounts;
- pending = ns->pending_mounts;
- sum = old + pending;
- if ((old > sum) ||
- (pending > sum) ||
- (max < sum) ||
- (mounts > (max - sum)))
+ if (mounts > max)
return -ENOSPC;
- ns->pending_mounts = pending + mounts;
+ ns->pending_mounts += mounts;
return 0;
}
@@ -2921,7 +2922,7 @@ static int do_move_mount_old(struct path *path, const char *old_name)
* add a mount into a namespace's mount tree
*/
static int do_add_mount(struct mount *newmnt, struct mountpoint *mp,
- struct path *path, int mnt_flags)
+ const struct path *path, int mnt_flags)
{
struct mount *parent = real_mount(path->mnt);
@@ -3044,7 +3045,7 @@ static int do_new_mount(struct path *path, const char *fstype, int sb_flags,
return err;
}
-int finish_automount(struct vfsmount *m, struct path *path)
+int finish_automount(struct vfsmount *m, const struct path *path)
{
struct dentry *dentry = path->dentry;
struct mountpoint *mp;
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index b0ca244c50d0..150b7fa8f0a7 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -646,7 +646,7 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from)
result = generic_write_checks(iocb, from);
if (result > 0) {
current->backing_dev_info = inode_to_bdi(inode);
- result = generic_perform_write(file, from, iocb->ki_pos);
+ result = generic_perform_write(iocb, from);
current->backing_dev_info = NULL;
}
nfs_end_io_write(inode);
diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c
index 66bdaa2cf496..ca611ac09f7c 100644
--- a/fs/nilfs2/btnode.c
+++ b/fs/nilfs2/btnode.c
@@ -20,6 +20,23 @@
#include "page.h"
#include "btnode.h"
+
+/**
+ * nilfs_init_btnc_inode - initialize B-tree node cache inode
+ * @btnc_inode: inode to be initialized
+ *
+ * nilfs_init_btnc_inode() sets up an inode for B-tree node cache.
+ */
+void nilfs_init_btnc_inode(struct inode *btnc_inode)
+{
+ struct nilfs_inode_info *ii = NILFS_I(btnc_inode);
+
+ btnc_inode->i_mode = S_IFREG;
+ ii->i_flags = 0;
+ memset(&ii->i_bmap_data, 0, sizeof(struct nilfs_bmap));
+ mapping_set_gfp_mask(btnc_inode->i_mapping, GFP_NOFS);
+}
+
void nilfs_btnode_cache_clear(struct address_space *btnc)
{
invalidate_mapping_pages(btnc, 0, -1);
@@ -29,7 +46,7 @@ void nilfs_btnode_cache_clear(struct address_space *btnc)
struct buffer_head *
nilfs_btnode_create_block(struct address_space *btnc, __u64 blocknr)
{
- struct inode *inode = NILFS_BTNC_I(btnc);
+ struct inode *inode = btnc->host;
struct buffer_head *bh;
bh = nilfs_grab_buffer(inode, btnc, blocknr, BIT(BH_NILFS_Node));
@@ -57,7 +74,7 @@ int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr,
struct buffer_head **pbh, sector_t *submit_ptr)
{
struct buffer_head *bh;
- struct inode *inode = NILFS_BTNC_I(btnc);
+ struct inode *inode = btnc->host;
struct page *page;
int err;
@@ -157,7 +174,7 @@ int nilfs_btnode_prepare_change_key(struct address_space *btnc,
struct nilfs_btnode_chkey_ctxt *ctxt)
{
struct buffer_head *obh, *nbh;
- struct inode *inode = NILFS_BTNC_I(btnc);
+ struct inode *inode = btnc->host;
__u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey;
int err;
diff --git a/fs/nilfs2/btnode.h b/fs/nilfs2/btnode.h
index 11663650add7..bd5544e63a01 100644
--- a/fs/nilfs2/btnode.h
+++ b/fs/nilfs2/btnode.h
@@ -30,6 +30,7 @@ struct nilfs_btnode_chkey_ctxt {
struct buffer_head *newbh;
};
+void nilfs_init_btnc_inode(struct inode *btnc_inode);
void nilfs_btnode_cache_clear(struct address_space *);
struct buffer_head *nilfs_btnode_create_block(struct address_space *btnc,
__u64 blocknr);
diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c
index 3594eabe1419..f544c22fff78 100644
--- a/fs/nilfs2/btree.c
+++ b/fs/nilfs2/btree.c
@@ -58,7 +58,8 @@ static void nilfs_btree_free_path(struct nilfs_btree_path *path)
static int nilfs_btree_get_new_block(const struct nilfs_bmap *btree,
__u64 ptr, struct buffer_head **bhp)
{
- struct address_space *btnc = &NILFS_BMAP_I(btree)->i_btnode_cache;
+ struct inode *btnc_inode = NILFS_BMAP_I(btree)->i_assoc_inode;
+ struct address_space *btnc = btnc_inode->i_mapping;
struct buffer_head *bh;
bh = nilfs_btnode_create_block(btnc, ptr);
@@ -470,7 +471,8 @@ static int __nilfs_btree_get_block(const struct nilfs_bmap *btree, __u64 ptr,
struct buffer_head **bhp,
const struct nilfs_btree_readahead_info *ra)
{
- struct address_space *btnc = &NILFS_BMAP_I(btree)->i_btnode_cache;
+ struct inode *btnc_inode = NILFS_BMAP_I(btree)->i_assoc_inode;
+ struct address_space *btnc = btnc_inode->i_mapping;
struct buffer_head *bh, *ra_bh;
sector_t submit_ptr = 0;
int ret;
@@ -1741,6 +1743,10 @@ nilfs_btree_prepare_convert_and_insert(struct nilfs_bmap *btree, __u64 key,
dat = nilfs_bmap_get_dat(btree);
}
+ ret = nilfs_attach_btree_node_cache(&NILFS_BMAP_I(btree)->vfs_inode);
+ if (ret < 0)
+ return ret;
+
ret = nilfs_bmap_prepare_alloc_ptr(btree, dreq, dat);
if (ret < 0)
return ret;
@@ -1913,7 +1919,7 @@ static int nilfs_btree_prepare_update_v(struct nilfs_bmap *btree,
path[level].bp_ctxt.newkey = path[level].bp_newreq.bpr_ptr;
path[level].bp_ctxt.bh = path[level].bp_bh;
ret = nilfs_btnode_prepare_change_key(
- &NILFS_BMAP_I(btree)->i_btnode_cache,
+ NILFS_BMAP_I(btree)->i_assoc_inode->i_mapping,
&path[level].bp_ctxt);
if (ret < 0) {
nilfs_dat_abort_update(dat,
@@ -1939,7 +1945,7 @@ static void nilfs_btree_commit_update_v(struct nilfs_bmap *btree,
if (buffer_nilfs_node(path[level].bp_bh)) {
nilfs_btnode_commit_change_key(
- &NILFS_BMAP_I(btree)->i_btnode_cache,
+ NILFS_BMAP_I(btree)->i_assoc_inode->i_mapping,
&path[level].bp_ctxt);
path[level].bp_bh = path[level].bp_ctxt.bh;
}
@@ -1958,7 +1964,7 @@ static void nilfs_btree_abort_update_v(struct nilfs_bmap *btree,
&path[level].bp_newreq.bpr_req);
if (buffer_nilfs_node(path[level].bp_bh))
nilfs_btnode_abort_change_key(
- &NILFS_BMAP_I(btree)->i_btnode_cache,
+ NILFS_BMAP_I(btree)->i_assoc_inode->i_mapping,
&path[level].bp_ctxt);
}
@@ -2134,7 +2140,8 @@ static void nilfs_btree_add_dirty_buffer(struct nilfs_bmap *btree,
static void nilfs_btree_lookup_dirty_buffers(struct nilfs_bmap *btree,
struct list_head *listp)
{
- struct address_space *btcache = &NILFS_BMAP_I(btree)->i_btnode_cache;
+ struct inode *btnc_inode = NILFS_BMAP_I(btree)->i_assoc_inode;
+ struct address_space *btcache = btnc_inode->i_mapping;
struct list_head lists[NILFS_BTREE_LEVEL_MAX];
struct pagevec pvec;
struct buffer_head *bh, *head;
@@ -2188,12 +2195,12 @@ static int nilfs_btree_assign_p(struct nilfs_bmap *btree,
path[level].bp_ctxt.newkey = blocknr;
path[level].bp_ctxt.bh = *bh;
ret = nilfs_btnode_prepare_change_key(
- &NILFS_BMAP_I(btree)->i_btnode_cache,
+ NILFS_BMAP_I(btree)->i_assoc_inode->i_mapping,
&path[level].bp_ctxt);
if (ret < 0)
return ret;
nilfs_btnode_commit_change_key(
- &NILFS_BMAP_I(btree)->i_btnode_cache,
+ NILFS_BMAP_I(btree)->i_assoc_inode->i_mapping,
&path[level].bp_ctxt);
*bh = path[level].bp_ctxt.bh;
}
@@ -2398,6 +2405,10 @@ int nilfs_btree_init(struct nilfs_bmap *bmap)
if (nilfs_btree_root_broken(nilfs_btree_get_root(bmap), bmap->b_inode))
ret = -EIO;
+ else
+ ret = nilfs_attach_btree_node_cache(
+ &NILFS_BMAP_I(bmap)->vfs_inode);
+
return ret;
}
diff --git a/fs/nilfs2/dat.c b/fs/nilfs2/dat.c
index dc51d3b7a7bf..3b55e239705f 100644
--- a/fs/nilfs2/dat.c
+++ b/fs/nilfs2/dat.c
@@ -497,7 +497,9 @@ int nilfs_dat_read(struct super_block *sb, size_t entry_size,
di = NILFS_DAT_I(dat);
lockdep_set_class(&di->mi.mi_sem, &dat_lock_key);
nilfs_palloc_setup_cache(dat, &di->palloc_cache);
- nilfs_mdt_setup_shadow_map(dat, &di->shadow);
+ err = nilfs_mdt_setup_shadow_map(dat, &di->shadow);
+ if (err)
+ goto failed;
err = nilfs_read_inode_common(dat, raw_inode);
if (err)
diff --git a/fs/nilfs2/gcinode.c b/fs/nilfs2/gcinode.c
index a8f5315f01e3..04fdd420eae7 100644
--- a/fs/nilfs2/gcinode.c
+++ b/fs/nilfs2/gcinode.c
@@ -126,9 +126,10 @@ int nilfs_gccache_submit_read_data(struct inode *inode, sector_t blkoff,
int nilfs_gccache_submit_read_node(struct inode *inode, sector_t pbn,
__u64 vbn, struct buffer_head **out_bh)
{
+ struct inode *btnc_inode = NILFS_I(inode)->i_assoc_inode;
int ret;
- ret = nilfs_btnode_submit_block(&NILFS_I(inode)->i_btnode_cache,
+ ret = nilfs_btnode_submit_block(btnc_inode->i_mapping,
vbn ? : pbn, pbn, REQ_OP_READ, 0,
out_bh, &pbn);
if (ret == -EEXIST) /* internal code (cache hit) */
@@ -170,7 +171,7 @@ int nilfs_init_gcinode(struct inode *inode)
ii->i_flags = 0;
nilfs_bmap_init_gc(ii->i_bmap);
- return 0;
+ return nilfs_attach_btree_node_cache(inode);
}
/**
@@ -185,7 +186,7 @@ void nilfs_remove_all_gcinodes(struct the_nilfs *nilfs)
ii = list_first_entry(head, struct nilfs_inode_info, i_dirty);
list_del_init(&ii->i_dirty);
truncate_inode_pages(&ii->vfs_inode.i_data, 0);
- nilfs_btnode_cache_clear(&ii->i_btnode_cache);
+ nilfs_btnode_cache_clear(ii->i_assoc_inode->i_mapping);
iput(&ii->vfs_inode);
}
}
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index 476a4a649f38..6045cea21f52 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -29,12 +29,16 @@
* @cno: checkpoint number
* @root: pointer on NILFS root object (mounted checkpoint)
* @for_gc: inode for GC flag
+ * @for_btnc: inode for B-tree node cache flag
+ * @for_shadow: inode for shadowed page cache flag
*/
struct nilfs_iget_args {
u64 ino;
__u64 cno;
struct nilfs_root *root;
- int for_gc;
+ bool for_gc;
+ bool for_btnc;
+ bool for_shadow;
};
static int nilfs_iget_test(struct inode *inode, void *opaque);
@@ -312,7 +316,8 @@ static int nilfs_insert_inode_locked(struct inode *inode,
unsigned long ino)
{
struct nilfs_iget_args args = {
- .ino = ino, .root = root, .cno = 0, .for_gc = 0
+ .ino = ino, .root = root, .cno = 0, .for_gc = false,
+ .for_btnc = false, .for_shadow = false
};
return insert_inode_locked4(inode, ino, nilfs_iget_test, &args);
@@ -525,6 +530,19 @@ static int nilfs_iget_test(struct inode *inode, void *opaque)
return 0;
ii = NILFS_I(inode);
+ if (test_bit(NILFS_I_BTNC, &ii->i_state)) {
+ if (!args->for_btnc)
+ return 0;
+ } else if (args->for_btnc) {
+ return 0;
+ }
+ if (test_bit(NILFS_I_SHADOW, &ii->i_state)) {
+ if (!args->for_shadow)
+ return 0;
+ } else if (args->for_shadow) {
+ return 0;
+ }
+
if (!test_bit(NILFS_I_GCINODE, &ii->i_state))
return !args->for_gc;
@@ -536,15 +554,17 @@ static int nilfs_iget_set(struct inode *inode, void *opaque)
struct nilfs_iget_args *args = opaque;
inode->i_ino = args->ino;
- if (args->for_gc) {
+ NILFS_I(inode)->i_cno = args->cno;
+ NILFS_I(inode)->i_root = args->root;
+ if (args->root && args->ino == NILFS_ROOT_INO)
+ nilfs_get_root(args->root);
+
+ if (args->for_gc)
NILFS_I(inode)->i_state = BIT(NILFS_I_GCINODE);
- NILFS_I(inode)->i_cno = args->cno;
- NILFS_I(inode)->i_root = NULL;
- } else {
- if (args->root && args->ino == NILFS_ROOT_INO)
- nilfs_get_root(args->root);
- NILFS_I(inode)->i_root = args->root;
- }
+ if (args->for_btnc)
+ NILFS_I(inode)->i_state |= BIT(NILFS_I_BTNC);
+ if (args->for_shadow)
+ NILFS_I(inode)->i_state |= BIT(NILFS_I_SHADOW);
return 0;
}
@@ -552,7 +572,8 @@ struct inode *nilfs_ilookup(struct super_block *sb, struct nilfs_root *root,
unsigned long ino)
{
struct nilfs_iget_args args = {
- .ino = ino, .root = root, .cno = 0, .for_gc = 0
+ .ino = ino, .root = root, .cno = 0, .for_gc = false,
+ .for_btnc = false, .for_shadow = false
};
return ilookup5(sb, ino, nilfs_iget_test, &args);
@@ -562,7 +583,8 @@ struct inode *nilfs_iget_locked(struct super_block *sb, struct nilfs_root *root,
unsigned long ino)
{
struct nilfs_iget_args args = {
- .ino = ino, .root = root, .cno = 0, .for_gc = 0
+ .ino = ino, .root = root, .cno = 0, .for_gc = false,
+ .for_btnc = false, .for_shadow = false
};
return iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
@@ -593,7 +615,8 @@ struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino,
__u64 cno)
{
struct nilfs_iget_args args = {
- .ino = ino, .root = NULL, .cno = cno, .for_gc = 1
+ .ino = ino, .root = NULL, .cno = cno, .for_gc = true,
+ .for_btnc = false, .for_shadow = false
};
struct inode *inode;
int err;
@@ -613,6 +636,113 @@ struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino,
return inode;
}
+/**
+ * nilfs_attach_btree_node_cache - attach a B-tree node cache to the inode
+ * @inode: inode object
+ *
+ * nilfs_attach_btree_node_cache() attaches a B-tree node cache to @inode,
+ * or does nothing if the inode already has it. This function allocates
+ * an additional inode to maintain page cache of B-tree nodes one-on-one.
+ *
+ * Return Value: On success, 0 is returned. On errors, one of the following
+ * negative error code is returned.
+ *
+ * %-ENOMEM - Insufficient memory available.
+ */
+int nilfs_attach_btree_node_cache(struct inode *inode)
+{
+ struct nilfs_inode_info *ii = NILFS_I(inode);
+ struct inode *btnc_inode;
+ struct nilfs_iget_args args;
+
+ if (ii->i_assoc_inode)
+ return 0;
+
+ args.ino = inode->i_ino;
+ args.root = ii->i_root;
+ args.cno = ii->i_cno;
+ args.for_gc = test_bit(NILFS_I_GCINODE, &ii->i_state) != 0;
+ args.for_btnc = true;
+ args.for_shadow = test_bit(NILFS_I_SHADOW, &ii->i_state) != 0;
+
+ btnc_inode = iget5_locked(inode->i_sb, inode->i_ino, nilfs_iget_test,
+ nilfs_iget_set, &args);
+ if (unlikely(!btnc_inode))
+ return -ENOMEM;
+ if (btnc_inode->i_state & I_NEW) {
+ nilfs_init_btnc_inode(btnc_inode);
+ unlock_new_inode(btnc_inode);
+ }
+ NILFS_I(btnc_inode)->i_assoc_inode = inode;
+ NILFS_I(btnc_inode)->i_bmap = ii->i_bmap;
+ ii->i_assoc_inode = btnc_inode;
+
+ return 0;
+}
+
+/**
+ * nilfs_detach_btree_node_cache - detach the B-tree node cache from the inode
+ * @inode: inode object
+ *
+ * nilfs_detach_btree_node_cache() detaches the B-tree node cache and its
+ * holder inode bound to @inode, or does nothing if @inode doesn't have it.
+ */
+void nilfs_detach_btree_node_cache(struct inode *inode)
+{
+ struct nilfs_inode_info *ii = NILFS_I(inode);
+ struct inode *btnc_inode = ii->i_assoc_inode;
+
+ if (btnc_inode) {
+ NILFS_I(btnc_inode)->i_assoc_inode = NULL;
+ ii->i_assoc_inode = NULL;
+ iput(btnc_inode);
+ }
+}
+
+/**
+ * nilfs_iget_for_shadow - obtain inode for shadow mapping
+ * @inode: inode object that uses shadow mapping
+ *
+ * nilfs_iget_for_shadow() allocates a pair of inodes that holds page
+ * caches for shadow mapping. The page cache for data pages is set up
+ * in one inode and the one for b-tree node pages is set up in the
+ * other inode, which is attached to the former inode.
+ *
+ * Return Value: On success, a pointer to the inode for data pages is
+ * returned. On errors, one of the following negative error code is returned
+ * in a pointer type.
+ *
+ * %-ENOMEM - Insufficient memory available.
+ */
+struct inode *nilfs_iget_for_shadow(struct inode *inode)
+{
+ struct nilfs_iget_args args = {
+ .ino = inode->i_ino, .root = NULL, .cno = 0, .for_gc = false,
+ .for_btnc = false, .for_shadow = true
+ };
+ struct inode *s_inode;
+ int err;
+
+ s_inode = iget5_locked(inode->i_sb, inode->i_ino, nilfs_iget_test,
+ nilfs_iget_set, &args);
+ if (unlikely(!s_inode))
+ return ERR_PTR(-ENOMEM);
+ if (!(s_inode->i_state & I_NEW))
+ return inode;
+
+ NILFS_I(s_inode)->i_flags = 0;
+ memset(NILFS_I(s_inode)->i_bmap, 0, sizeof(struct nilfs_bmap));
+ mapping_set_gfp_mask(s_inode->i_mapping, GFP_NOFS);
+
+ err = nilfs_attach_btree_node_cache(s_inode);
+ if (unlikely(err)) {
+ iget_failed(s_inode);
+ return ERR_PTR(err);
+ }
+ unlock_new_inode(s_inode);
+ return s_inode;
+}
+
void nilfs_write_inode_common(struct inode *inode,
struct nilfs_inode *raw_inode, int has_bmap)
{
@@ -760,7 +890,8 @@ static void nilfs_clear_inode(struct inode *inode)
if (test_bit(NILFS_I_BMAP, &ii->i_state))
nilfs_bmap_clear(ii->i_bmap);
- nilfs_btnode_cache_clear(&ii->i_btnode_cache);
+ if (!test_bit(NILFS_I_BTNC, &ii->i_state))
+ nilfs_detach_btree_node_cache(inode);
if (ii->i_root && inode->i_ino == NILFS_ROOT_INO)
nilfs_put_root(ii->i_root);
diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c
index 78db33decd72..d29a0f2b9c16 100644
--- a/fs/nilfs2/mdt.c
+++ b/fs/nilfs2/mdt.c
@@ -471,9 +471,18 @@ int nilfs_mdt_init(struct inode *inode, gfp_t gfp_mask, size_t objsz)
void nilfs_mdt_clear(struct inode *inode)
{
struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
+ struct nilfs_shadow_map *shadow = mdi->mi_shadow;
if (mdi->mi_palloc_cache)
nilfs_palloc_destroy_cache(inode);
+
+ if (shadow) {
+ struct inode *s_inode = shadow->inode;
+
+ shadow->inode = NULL;
+ iput(s_inode);
+ mdi->mi_shadow = NULL;
+ }
}
/**
@@ -507,12 +516,15 @@ int nilfs_mdt_setup_shadow_map(struct inode *inode,
struct nilfs_shadow_map *shadow)
{
struct nilfs_mdt_info *mi = NILFS_MDT(inode);
+ struct inode *s_inode;
INIT_LIST_HEAD(&shadow->frozen_buffers);
- address_space_init_once(&shadow->frozen_data);
- nilfs_mapping_init(&shadow->frozen_data, inode);
- address_space_init_once(&shadow->frozen_btnodes);
- nilfs_mapping_init(&shadow->frozen_btnodes, inode);
+
+ s_inode = nilfs_iget_for_shadow(inode);
+ if (IS_ERR(s_inode))
+ return PTR_ERR(s_inode);
+
+ shadow->inode = s_inode;
mi->mi_shadow = shadow;
return 0;
}
@@ -526,14 +538,15 @@ int nilfs_mdt_save_to_shadow_map(struct inode *inode)
struct nilfs_mdt_info *mi = NILFS_MDT(inode);
struct nilfs_inode_info *ii = NILFS_I(inode);
struct nilfs_shadow_map *shadow = mi->mi_shadow;
+ struct inode *s_inode = shadow->inode;
int ret;
- ret = nilfs_copy_dirty_pages(&shadow->frozen_data, inode->i_mapping);
+ ret = nilfs_copy_dirty_pages(s_inode->i_mapping, inode->i_mapping);
if (ret)
goto out;
- ret = nilfs_copy_dirty_pages(&shadow->frozen_btnodes,
- &ii->i_btnode_cache);
+ ret = nilfs_copy_dirty_pages(NILFS_I(s_inode)->i_assoc_inode->i_mapping,
+ ii->i_assoc_inode->i_mapping);
if (ret)
goto out;
@@ -549,7 +562,7 @@ int nilfs_mdt_freeze_buffer(struct inode *inode, struct buffer_head *bh)
struct page *page;
int blkbits = inode->i_blkbits;
- page = grab_cache_page(&shadow->frozen_data, bh->b_page->index);
+ page = grab_cache_page(shadow->inode->i_mapping, bh->b_page->index);
if (!page)
return -ENOMEM;
@@ -581,7 +594,7 @@ nilfs_mdt_get_frozen_buffer(struct inode *inode, struct buffer_head *bh)
struct page *page;
int n;
- page = find_lock_page(&shadow->frozen_data, bh->b_page->index);
+ page = find_lock_page(shadow->inode->i_mapping, bh->b_page->index);
if (page) {
if (page_has_buffers(page)) {
n = bh_offset(bh) >> inode->i_blkbits;
@@ -622,10 +635,11 @@ void nilfs_mdt_restore_from_shadow_map(struct inode *inode)
nilfs_palloc_clear_cache(inode);
nilfs_clear_dirty_pages(inode->i_mapping, true);
- nilfs_copy_back_pages(inode->i_mapping, &shadow->frozen_data);
+ nilfs_copy_back_pages(inode->i_mapping, shadow->inode->i_mapping);
- nilfs_clear_dirty_pages(&ii->i_btnode_cache, true);
- nilfs_copy_back_pages(&ii->i_btnode_cache, &shadow->frozen_btnodes);
+ nilfs_clear_dirty_pages(ii->i_assoc_inode->i_mapping, true);
+ nilfs_copy_back_pages(ii->i_assoc_inode->i_mapping,
+ NILFS_I(shadow->inode)->i_assoc_inode->i_mapping);
nilfs_bmap_restore(ii->i_bmap, &shadow->bmap_store);
@@ -640,10 +654,11 @@ void nilfs_mdt_clear_shadow_map(struct inode *inode)
{
struct nilfs_mdt_info *mi = NILFS_MDT(inode);
struct nilfs_shadow_map *shadow = mi->mi_shadow;
+ struct inode *shadow_btnc_inode = NILFS_I(shadow->inode)->i_assoc_inode;
down_write(&mi->mi_sem);
nilfs_release_frozen_buffers(shadow);
- truncate_inode_pages(&shadow->frozen_data, 0);
- truncate_inode_pages(&shadow->frozen_btnodes, 0);
+ truncate_inode_pages(shadow->inode->i_mapping, 0);
+ truncate_inode_pages(shadow_btnc_inode->i_mapping, 0);
up_write(&mi->mi_sem);
}
diff --git a/fs/nilfs2/mdt.h b/fs/nilfs2/mdt.h
index 8f86080a436d..9e23bab3ff12 100644
--- a/fs/nilfs2/mdt.h
+++ b/fs/nilfs2/mdt.h
@@ -18,14 +18,12 @@
/**
* struct nilfs_shadow_map - shadow mapping of meta data file
* @bmap_store: shadow copy of bmap state
- * @frozen_data: shadowed dirty data pages
- * @frozen_btnodes: shadowed dirty b-tree nodes' pages
+ * @inode: holder of page caches used in shadow mapping
* @frozen_buffers: list of frozen buffers
*/
struct nilfs_shadow_map {
struct nilfs_bmap_store bmap_store;
- struct address_space frozen_data;
- struct address_space frozen_btnodes;
+ struct inode *inode;
struct list_head frozen_buffers;
};
diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h
index a7b81755c350..1344f7d475d3 100644
--- a/fs/nilfs2/nilfs.h
+++ b/fs/nilfs2/nilfs.h
@@ -28,7 +28,7 @@
* @i_xattr: <TODO>
* @i_dir_start_lookup: page index of last successful search
* @i_cno: checkpoint number for GC inode
- * @i_btnode_cache: cached pages of b-tree nodes
+ * @i_assoc_inode: associated inode (B-tree node cache holder or back pointer)
* @i_dirty: list for connecting dirty files
* @xattr_sem: semaphore for extended attributes processing
* @i_bh: buffer contains disk inode
@@ -43,7 +43,7 @@ struct nilfs_inode_info {
__u64 i_xattr; /* sector_t ??? */
__u32 i_dir_start_lookup;
__u64 i_cno; /* check point number for GC inode */
- struct address_space i_btnode_cache;
+ struct inode *i_assoc_inode;
struct list_head i_dirty; /* List for connecting dirty files */
#ifdef CONFIG_NILFS_XATTR
@@ -75,13 +75,6 @@ NILFS_BMAP_I(const struct nilfs_bmap *bmap)
return container_of(bmap, struct nilfs_inode_info, i_bmap_data);
}
-static inline struct inode *NILFS_BTNC_I(struct address_space *btnc)
-{
- struct nilfs_inode_info *ii =
- container_of(btnc, struct nilfs_inode_info, i_btnode_cache);
- return &ii->vfs_inode;
-}
-
/*
* Dynamic state flags of NILFS on-memory inode (i_state)
*/
@@ -98,6 +91,8 @@ enum {
NILFS_I_INODE_SYNC, /* dsync is not allowed for inode */
NILFS_I_BMAP, /* has bmap and btnode_cache */
NILFS_I_GCINODE, /* inode for GC, on memory only */
+ NILFS_I_BTNC, /* inode for btree node cache */
+ NILFS_I_SHADOW, /* inode for shadowed page cache */
};
/*
@@ -267,6 +262,9 @@ struct inode *nilfs_iget(struct super_block *sb, struct nilfs_root *root,
unsigned long ino);
extern struct inode *nilfs_iget_for_gc(struct super_block *sb,
unsigned long ino, __u64 cno);
+int nilfs_attach_btree_node_cache(struct inode *inode);
+void nilfs_detach_btree_node_cache(struct inode *inode);
+struct inode *nilfs_iget_for_shadow(struct inode *inode);
extern void nilfs_update_inode(struct inode *, struct buffer_head *, int);
extern void nilfs_truncate(struct inode *);
extern void nilfs_evict_inode(struct inode *);
diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c
index 063dd16d75b5..a8e88cc38e16 100644
--- a/fs/nilfs2/page.c
+++ b/fs/nilfs2/page.c
@@ -436,22 +436,12 @@ unsigned int nilfs_page_count_clean_buffers(struct page *page,
return nc;
}
-void nilfs_mapping_init(struct address_space *mapping, struct inode *inode)
-{
- mapping->host = inode;
- mapping->flags = 0;
- mapping_set_gfp_mask(mapping, GFP_NOFS);
- mapping->private_data = NULL;
- mapping->a_ops = &empty_aops;
-}
-
/*
* NILFS2 needs clear_page_dirty() in the following two cases:
*
- * 1) For B-tree node pages and data pages of the dat/gcdat, NILFS2 clears
- * page dirty flags when it copies back pages from the shadow cache
- * (gcdat->{i_mapping,i_btnode_cache}) to its original cache
- * (dat->{i_mapping,i_btnode_cache}).
+ * 1) For B-tree node pages and data pages of DAT file, NILFS2 clears dirty
+ * flag of pages when it copies back pages from shadow cache to the
+ * original cache.
*
* 2) Some B-tree operations like insertion or deletion may dispose buffers
* in dirty state, and this needs to cancel the dirty state of their pages.
diff --git a/fs/nilfs2/page.h b/fs/nilfs2/page.h
index 569263b23c0c..21ddcdd4d63e 100644
--- a/fs/nilfs2/page.h
+++ b/fs/nilfs2/page.h
@@ -43,7 +43,6 @@ int nilfs_copy_dirty_pages(struct address_space *, struct address_space *);
void nilfs_copy_back_pages(struct address_space *, struct address_space *);
void nilfs_clear_dirty_page(struct page *, bool);
void nilfs_clear_dirty_pages(struct address_space *, bool);
-void nilfs_mapping_init(struct address_space *mapping, struct inode *inode);
unsigned int nilfs_page_count_clean_buffers(struct page *, unsigned int,
unsigned int);
unsigned long nilfs_find_uncommitted_extent(struct inode *inode,
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 85a853334771..0afe0832c754 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -733,15 +733,18 @@ static void nilfs_lookup_dirty_node_buffers(struct inode *inode,
struct list_head *listp)
{
struct nilfs_inode_info *ii = NILFS_I(inode);
- struct address_space *mapping = &ii->i_btnode_cache;
+ struct inode *btnc_inode = ii->i_assoc_inode;
struct pagevec pvec;
struct buffer_head *bh, *head;
unsigned int i;
pgoff_t index = 0;
+ if (!btnc_inode)
+ return;
+
pagevec_init(&pvec);
- while (pagevec_lookup_tag(&pvec, mapping, &index,
+ while (pagevec_lookup_tag(&pvec, btnc_inode->i_mapping, &index,
PAGECACHE_TAG_DIRTY)) {
for (i = 0; i < pagevec_count(&pvec); i++) {
bh = head = page_buffers(pvec.pages[i]);
@@ -2410,7 +2413,7 @@ nilfs_remove_written_gcinodes(struct the_nilfs *nilfs, struct list_head *head)
continue;
list_del_init(&ii->i_dirty);
truncate_inode_pages(&ii->vfs_inode.i_data, 0);
- nilfs_btnode_cache_clear(&ii->i_btnode_cache);
+ nilfs_btnode_cache_clear(ii->i_assoc_inode->i_mapping);
iput(&ii->vfs_inode);
}
}
diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
index 3e05c98631ec..ba108f915391 100644
--- a/fs/nilfs2/super.c
+++ b/fs/nilfs2/super.c
@@ -157,7 +157,8 @@ struct inode *nilfs_alloc_inode(struct super_block *sb)
ii->i_bh = NULL;
ii->i_state = 0;
ii->i_cno = 0;
- nilfs_mapping_init(&ii->i_btnode_cache, &ii->vfs_inode);
+ ii->i_assoc_inode = NULL;
+ ii->i_bmap = &ii->i_bmap_data;
return &ii->vfs_inode;
}
@@ -1377,8 +1378,6 @@ static void nilfs_inode_init_once(void *obj)
#ifdef CONFIG_NILFS_XATTR
init_rwsem(&ii->xattr_sem);
#endif
- address_space_init_once(&ii->i_btnode_cache);
- ii->i_bmap = &ii->i_bmap_data;
inode_init_once(&ii->vfs_inode);
}
diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c
index d154dcfe06af..90e3dad8ee45 100644
--- a/fs/ntfs/aops.c
+++ b/fs/ntfs/aops.c
@@ -1746,7 +1746,7 @@ void mark_ntfs_record_dirty(struct page *page, const unsigned int ofs) {
set_buffer_dirty(bh);
} while ((bh = bh->b_this_page) != head);
spin_unlock(&mapping->private_lock);
- block_dirty_folio(mapping, page_folio(page));
+ filemap_dirty_folio(mapping, page_folio(page));
if (unlikely(buffers_to_free)) {
do {
bh = buffers_to_free->b_this_page;
diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c
index 273f65e0aaba..0b6f551a342a 100644
--- a/fs/ocfs2/quota_global.c
+++ b/fs/ocfs2/quota_global.c
@@ -337,7 +337,6 @@ void ocfs2_unlock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex)
/* Read information header from global quota file */
int ocfs2_global_read_info(struct super_block *sb, int type)
{
- struct inode *gqinode = NULL;
unsigned int ino[OCFS2_MAXQUOTAS] = { USER_QUOTA_SYSTEM_INODE,
GROUP_QUOTA_SYSTEM_INODE };
struct ocfs2_global_disk_dqinfo dinfo;
@@ -346,29 +345,31 @@ int ocfs2_global_read_info(struct super_block *sb, int type)
u64 pcount;
int status;
+ oinfo->dqi_gi.dqi_sb = sb;
+ oinfo->dqi_gi.dqi_type = type;
+ ocfs2_qinfo_lock_res_init(&oinfo->dqi_gqlock, oinfo);
+ oinfo->dqi_gi.dqi_entry_size = sizeof(struct ocfs2_global_disk_dqblk);
+ oinfo->dqi_gi.dqi_ops = &ocfs2_global_ops;
+ oinfo->dqi_gqi_bh = NULL;
+ oinfo->dqi_gqi_count = 0;
+
/* Read global header */
- gqinode = ocfs2_get_system_file_inode(OCFS2_SB(sb), ino[type],
+ oinfo->dqi_gqinode = ocfs2_get_system_file_inode(OCFS2_SB(sb), ino[type],
OCFS2_INVALID_SLOT);
- if (!gqinode) {
+ if (!oinfo->dqi_gqinode) {
mlog(ML_ERROR, "failed to get global quota inode (type=%d)\n",
type);
status = -EINVAL;
goto out_err;
}
- oinfo->dqi_gi.dqi_sb = sb;
- oinfo->dqi_gi.dqi_type = type;
- oinfo->dqi_gi.dqi_entry_size = sizeof(struct ocfs2_global_disk_dqblk);
- oinfo->dqi_gi.dqi_ops = &ocfs2_global_ops;
- oinfo->dqi_gqi_bh = NULL;
- oinfo->dqi_gqi_count = 0;
- oinfo->dqi_gqinode = gqinode;
+
status = ocfs2_lock_global_qf(oinfo, 0);
if (status < 0) {
mlog_errno(status);
goto out_err;
}
- status = ocfs2_extent_map_get_blocks(gqinode, 0, &oinfo->dqi_giblk,
+ status = ocfs2_extent_map_get_blocks(oinfo->dqi_gqinode, 0, &oinfo->dqi_giblk,
&pcount, NULL);
if (status < 0)
goto out_unlock;
diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c
index 0e4b16d4c037..b1a8b046f4c2 100644
--- a/fs/ocfs2/quota_local.c
+++ b/fs/ocfs2/quota_local.c
@@ -702,8 +702,6 @@ static int ocfs2_local_read_info(struct super_block *sb, int type)
info->dqi_priv = oinfo;
oinfo->dqi_type = type;
INIT_LIST_HEAD(&oinfo->dqi_chunk);
- oinfo->dqi_gqinode = NULL;
- ocfs2_qinfo_lock_res_init(&oinfo->dqi_gqlock, oinfo);
oinfo->dqi_rec = NULL;
oinfo->dqi_lqi_bh = NULL;
oinfo->dqi_libh = NULL;
diff --git a/fs/read_write.c b/fs/read_write.c
index dc5000173b80..e643aec2b0ef 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -1630,7 +1630,6 @@ int generic_write_checks_count(struct kiocb *iocb, loff_t *count)
if (!*count)
return 0;
- /* FIXME: this is for backwards compatibility with 2.4 */
if (iocb->ki_flags & IOCB_APPEND)
iocb->ki_pos = i_size_read(inode);
diff --git a/fs/seq_file.c b/fs/seq_file.c
index f8e1f4ee87ff..7ab8a58c29b6 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -554,9 +554,9 @@ int seq_dentry(struct seq_file *m, struct dentry *dentry, const char *esc)
}
EXPORT_SYMBOL(seq_dentry);
-static void *single_start(struct seq_file *p, loff_t *pos)
+void *single_start(struct seq_file *p, loff_t *pos)
{
- return NULL + (*pos == 0);
+ return *pos ? NULL : SEQ_START_TOKEN;
}
static void *single_next(struct seq_file *p, void *v, loff_t *pos)
diff --git a/fs/smbfs_common/smb2pdu.h b/fs/smbfs_common/smb2pdu.h
index 38b8fc514860..0507aecfc669 100644
--- a/fs/smbfs_common/smb2pdu.h
+++ b/fs/smbfs_common/smb2pdu.h
@@ -61,6 +61,40 @@
#define NUMBER_OF_SMB2_COMMANDS 0x0013
/*
+ * Size of the session key (crypto key encrypted with the password
+ */
+#define SMB2_NTLMV2_SESSKEY_SIZE 16
+#define SMB2_SIGNATURE_SIZE 16
+#define SMB2_HMACSHA256_SIZE 32
+#define SMB2_CMACAES_SIZE 16
+#define SMB3_GCM128_CRYPTKEY_SIZE 16
+#define SMB3_GCM256_CRYPTKEY_SIZE 32
+
+/*
+ * Size of the smb3 encryption/decryption keys
+ * This size is big enough to store any cipher key types.
+ */
+#define SMB3_ENC_DEC_KEY_SIZE 32
+
+/*
+ * Size of the smb3 signing key
+ */
+#define SMB3_SIGN_KEY_SIZE 16
+
+#define CIFS_CLIENT_CHALLENGE_SIZE 8
+
+/* Maximum buffer size value we can send with 1 credit */
+#define SMB2_MAX_BUFFER_SIZE 65536
+
+/*
+ * The default wsize is 1M for SMB2 (and for some CIFS cases).
+ * find_get_pages seems to return a maximum of 256
+ * pages in a single call. With PAGE_SIZE == 4k, this means we can
+ * fill a single wsize request with a single call.
+ */
+#define SMB3_DEFAULT_IOSIZE (4 * 1024 * 1024)
+
+/*
* SMB2 Header Definition
*
* "MBZ" : Must be Zero
@@ -88,6 +122,15 @@
#define SMB2_FLAGS_DFS_OPERATIONS cpu_to_le32(0x10000000)
#define SMB2_FLAGS_REPLAY_OPERATION cpu_to_le32(0x20000000) /* SMB3 & up */
+/*
+ * Definitions for SMB2 Protocol Data Units (network frames)
+ *
+ * See MS-SMB2.PDF specification for protocol details.
+ * The Naming convention is the lower case version of the SMB2
+ * command code name for the struct. Note that structures must be packed.
+ *
+ */
+
/* See MS-SMB2 section 2.2.1 */
struct smb2_hdr {
__le32 ProtocolId; /* 0xFE 'S' 'M' 'B' */
@@ -115,6 +158,18 @@ struct smb2_pdu {
__le16 StructureSize2; /* size of wct area (varies, request specific) */
} __packed;
+#define SMB2_ERROR_STRUCTURE_SIZE2 9
+#define SMB2_ERROR_STRUCTURE_SIZE2_LE cpu_to_le16(SMB2_ERROR_STRUCTURE_SIZE2)
+
+struct smb2_err_rsp {
+ struct smb2_hdr hdr;
+ __le16 StructureSize;
+ __u8 ErrorContextCount;
+ __u8 Reserved;
+ __le32 ByteCount; /* even if zero, at least one byte follows */
+ __u8 ErrorData[1]; /* variable length */
+} __packed;
+
#define SMB3_AES_CCM_NONCE 11
#define SMB3_AES_GCM_NONCE 12
@@ -608,8 +663,8 @@ struct smb2_close_req {
__le16 StructureSize; /* Must be 24 */
__le16 Flags;
__le32 Reserved;
- __le64 PersistentFileId; /* opaque endianness */
- __le64 VolatileFileId; /* opaque endianness */
+ __u64 PersistentFileId; /* opaque endianness */
+ __u64 VolatileFileId; /* opaque endianness */
} __packed;
/*
@@ -653,8 +708,8 @@ struct smb2_read_req {
__u8 Flags; /* MBZ unless SMB3.02 or later */
__le32 Length;
__le64 Offset;
- __le64 PersistentFileId;
- __le64 VolatileFileId;
+ __u64 PersistentFileId;
+ __u64 VolatileFileId;
__le32 MinimumCount;
__le32 Channel; /* MBZ except for SMB3 or later */
__le32 RemainingBytes;
@@ -692,8 +747,8 @@ struct smb2_write_req {
__le16 DataOffset; /* offset from start of SMB2 header to write data */
__le32 Length;
__le64 Offset;
- __le64 PersistentFileId; /* opaque endianness */
- __le64 VolatileFileId; /* opaque endianness */
+ __u64 PersistentFileId; /* opaque endianness */
+ __u64 VolatileFileId; /* opaque endianness */
__le32 Channel; /* MBZ unless SMB3.02 or later */
__le32 RemainingBytes;
__le16 WriteChannelInfoOffset;
@@ -722,8 +777,8 @@ struct smb2_flush_req {
__le16 StructureSize; /* Must be 24 */
__le16 Reserved1;
__le32 Reserved2;
- __le64 PersistentFileId;
- __le64 VolatileFileId;
+ __u64 PersistentFileId;
+ __u64 VolatileFileId;
} __packed;
struct smb2_flush_rsp {
@@ -732,6 +787,123 @@ struct smb2_flush_rsp {
__le16 Reserved;
} __packed;
+#define SMB2_LOCKFLAG_SHARED 0x0001
+#define SMB2_LOCKFLAG_EXCLUSIVE 0x0002
+#define SMB2_LOCKFLAG_UNLOCK 0x0004
+#define SMB2_LOCKFLAG_FAIL_IMMEDIATELY 0x0010
+#define SMB2_LOCKFLAG_MASK 0x0007
+
+struct smb2_lock_element {
+ __le64 Offset;
+ __le64 Length;
+ __le32 Flags;
+ __le32 Reserved;
+} __packed;
+
+struct smb2_lock_req {
+ struct smb2_hdr hdr;
+ __le16 StructureSize; /* Must be 48 */
+ __le16 LockCount;
+ /*
+ * The least significant four bits are the index, the other 28 bits are
+ * the lock sequence number (0 to 64). See MS-SMB2 2.2.26
+ */
+ __le32 LockSequenceNumber;
+ __u64 PersistentFileId;
+ __u64 VolatileFileId;
+ /* Followed by at least one */
+ struct smb2_lock_element locks[1];
+} __packed;
+
+struct smb2_lock_rsp {
+ struct smb2_hdr hdr;
+ __le16 StructureSize; /* Must be 4 */
+ __le16 Reserved;
+} __packed;
+
+struct smb2_echo_req {
+ struct smb2_hdr hdr;
+ __le16 StructureSize; /* Must be 4 */
+ __u16 Reserved;
+} __packed;
+
+struct smb2_echo_rsp {
+ struct smb2_hdr hdr;
+ __le16 StructureSize; /* Must be 4 */
+ __u16 Reserved;
+} __packed;
+
+/*
+ * Valid FileInformation classes for query directory
+ *
+ * Note that these are a subset of the (file) QUERY_INFO levels defined
+ * later in this file (but since QUERY_DIRECTORY uses equivalent numbers
+ * we do not redefine them here)
+ *
+ * FileDirectoryInfomation 0x01
+ * FileFullDirectoryInformation 0x02
+ * FileIdFullDirectoryInformation 0x26
+ * FileBothDirectoryInformation 0x03
+ * FileIdBothDirectoryInformation 0x25
+ * FileNamesInformation 0x0C
+ * FileIdExtdDirectoryInformation 0x3C
+ */
+
+/* search (query_directory) Flags field */
+#define SMB2_RESTART_SCANS 0x01
+#define SMB2_RETURN_SINGLE_ENTRY 0x02
+#define SMB2_INDEX_SPECIFIED 0x04
+#define SMB2_REOPEN 0x10
+
+struct smb2_query_directory_req {
+ struct smb2_hdr hdr;
+ __le16 StructureSize; /* Must be 33 */
+ __u8 FileInformationClass;
+ __u8 Flags;
+ __le32 FileIndex;
+ __u64 PersistentFileId;
+ __u64 VolatileFileId;
+ __le16 FileNameOffset;
+ __le16 FileNameLength;
+ __le32 OutputBufferLength;
+ __u8 Buffer[1];
+} __packed;
+
+struct smb2_query_directory_rsp {
+ struct smb2_hdr hdr;
+ __le16 StructureSize; /* Must be 9 */
+ __le16 OutputBufferOffset;
+ __le32 OutputBufferLength;
+ __u8 Buffer[1];
+} __packed;
+
+/*
+ * Maximum number of iovs we need for a set-info request.
+ * The largest one is rename/hardlink
+ * [0] : struct smb2_set_info_req + smb2_file_[rename|link]_info
+ * [1] : path
+ * [2] : compound padding
+ */
+#define SMB2_SET_INFO_IOV_SIZE 3
+
+struct smb2_set_info_req {
+ struct smb2_hdr hdr;
+ __le16 StructureSize; /* Must be 33 */
+ __u8 InfoType;
+ __u8 FileInfoClass;
+ __le32 BufferLength;
+ __le16 BufferOffset;
+ __u16 Reserved;
+ __le32 AdditionalInformation;
+ __u64 PersistentFileId;
+ __u64 VolatileFileId;
+ __u8 Buffer[1];
+} __packed;
+
+struct smb2_set_info_rsp {
+ struct smb2_hdr hdr;
+ __le16 StructureSize; /* Must be 2 */
+} __packed;
/*
* SMB2_NOTIFY See MS-SMB2 section 2.2.35
@@ -769,8 +941,8 @@ struct smb2_change_notify_req {
__le16 StructureSize;
__le16 Flags;
__le32 OutputBufferLength;
- __le64 PersistentFileId; /* opaque endianness */
- __le64 VolatileFileId; /* opaque endianness */
+ __u64 PersistentFileId; /* opaque endianness */
+ __u64 VolatileFileId; /* opaque endianness */
__le32 CompletionFilter;
__u32 Reserved;
} __packed;
@@ -978,12 +1150,455 @@ struct smb2_create_rsp {
__le64 EndofFile;
__le32 FileAttributes;
__le32 Reserved2;
- __le64 PersistentFileId;
- __le64 VolatileFileId;
+ __u64 PersistentFileId;
+ __u64 VolatileFileId;
__le32 CreateContextsOffset;
__le32 CreateContextsLength;
__u8 Buffer[1];
} __packed;
+struct create_posix {
+ struct create_context ccontext;
+ __u8 Name[16];
+ __le32 Mode;
+ __u32 Reserved;
+} __packed;
+
+#define SMB2_LEASE_NONE_LE cpu_to_le32(0x00)
+#define SMB2_LEASE_READ_CACHING_LE cpu_to_le32(0x01)
+#define SMB2_LEASE_HANDLE_CACHING_LE cpu_to_le32(0x02)
+#define SMB2_LEASE_WRITE_CACHING_LE cpu_to_le32(0x04)
+
+#define SMB2_LEASE_FLAG_BREAK_IN_PROGRESS_LE cpu_to_le32(0x02)
+
+#define SMB2_LEASE_KEY_SIZE 16
+
+struct lease_context {
+ __u8 LeaseKey[SMB2_LEASE_KEY_SIZE];
+ __le32 LeaseState;
+ __le32 LeaseFlags;
+ __le64 LeaseDuration;
+} __packed;
+
+struct lease_context_v2 {
+ __u8 LeaseKey[SMB2_LEASE_KEY_SIZE];
+ __le32 LeaseState;
+ __le32 LeaseFlags;
+ __le64 LeaseDuration;
+ __u8 ParentLeaseKey[SMB2_LEASE_KEY_SIZE];
+ __le16 Epoch;
+ __le16 Reserved;
+} __packed;
+
+struct create_lease {
+ struct create_context ccontext;
+ __u8 Name[8];
+ struct lease_context lcontext;
+} __packed;
+
+struct create_lease_v2 {
+ struct create_context ccontext;
+ __u8 Name[8];
+ struct lease_context_v2 lcontext;
+ __u8 Pad[4];
+} __packed;
+
+/* See MS-SMB2 2.2.31 and 2.2.32 */
+struct smb2_ioctl_req {
+ struct smb2_hdr hdr;
+ __le16 StructureSize; /* Must be 57 */
+ __le16 Reserved; /* offset from start of SMB2 header to write data */
+ __le32 CtlCode;
+ __u64 PersistentFileId;
+ __u64 VolatileFileId;
+ __le32 InputOffset; /* Reserved MBZ */
+ __le32 InputCount;
+ __le32 MaxInputResponse;
+ __le32 OutputOffset;
+ __le32 OutputCount;
+ __le32 MaxOutputResponse;
+ __le32 Flags;
+ __le32 Reserved2;
+ __u8 Buffer[];
+} __packed;
+
+struct smb2_ioctl_rsp {
+ struct smb2_hdr hdr;
+ __le16 StructureSize; /* Must be 49 */
+ __le16 Reserved;
+ __le32 CtlCode;
+ __u64 PersistentFileId;
+ __u64 VolatileFileId;
+ __le32 InputOffset; /* Reserved MBZ */
+ __le32 InputCount;
+ __le32 OutputOffset;
+ __le32 OutputCount;
+ __le32 Flags;
+ __le32 Reserved2;
+ __u8 Buffer[];
+} __packed;
+
+/* this goes in the ioctl buffer when doing FSCTL_SET_ZERO_DATA */
+struct file_zero_data_information {
+ __le64 FileOffset;
+ __le64 BeyondFinalZero;
+} __packed;
+
+/* Reparse structures - see MS-FSCC 2.1.2 */
+
+/* struct fsctl_reparse_info_req is empty, only response structs (see below) */
+struct reparse_data_buffer {
+ __le32 ReparseTag;
+ __le16 ReparseDataLength;
+ __u16 Reserved;
+ __u8 DataBuffer[]; /* Variable Length */
+} __packed;
+
+struct reparse_guid_data_buffer {
+ __le32 ReparseTag;
+ __le16 ReparseDataLength;
+ __u16 Reserved;
+ __u8 ReparseGuid[16];
+ __u8 DataBuffer[]; /* Variable Length */
+} __packed;
+
+struct reparse_mount_point_data_buffer {
+ __le32 ReparseTag;
+ __le16 ReparseDataLength;
+ __u16 Reserved;
+ __le16 SubstituteNameOffset;
+ __le16 SubstituteNameLength;
+ __le16 PrintNameOffset;
+ __le16 PrintNameLength;
+ __u8 PathBuffer[]; /* Variable Length */
+} __packed;
+
+#define SYMLINK_FLAG_RELATIVE 0x00000001
+
+struct reparse_symlink_data_buffer {
+ __le32 ReparseTag;
+ __le16 ReparseDataLength;
+ __u16 Reserved;
+ __le16 SubstituteNameOffset;
+ __le16 SubstituteNameLength;
+ __le16 PrintNameOffset;
+ __le16 PrintNameLength;
+ __le32 Flags;
+ __u8 PathBuffer[]; /* Variable Length */
+} __packed;
+
+/* See MS-FSCC 2.1.2.6 and cifspdu.h for struct reparse_posix_data */
+
+struct validate_negotiate_info_req {
+ __le32 Capabilities;
+ __u8 Guid[SMB2_CLIENT_GUID_SIZE];
+ __le16 SecurityMode;
+ __le16 DialectCount;
+ __le16 Dialects[4]; /* BB expand this if autonegotiate > 4 dialects */
+} __packed;
+
+struct validate_negotiate_info_rsp {
+ __le32 Capabilities;
+ __u8 Guid[SMB2_CLIENT_GUID_SIZE];
+ __le16 SecurityMode;
+ __le16 Dialect; /* Dialect in use for the connection */
+} __packed;
+
+struct duplicate_extents_to_file {
+ __u64 PersistentFileHandle; /* source file handle, opaque endianness */
+ __u64 VolatileFileHandle;
+ __le64 SourceFileOffset;
+ __le64 TargetFileOffset;
+ __le64 ByteCount; /* Bytes to be copied */
+} __packed;
+
+/* Possible InfoType values */
+#define SMB2_O_INFO_FILE 0x01
+#define SMB2_O_INFO_FILESYSTEM 0x02
+#define SMB2_O_INFO_SECURITY 0x03
+#define SMB2_O_INFO_QUOTA 0x04
+
+/* SMB2 Query Info see MS-SMB2 (2.2.37) or MS-DTYP */
+
+/* List of QUERY INFO levels (those also valid for QUERY_DIR are noted below */
+#define FILE_DIRECTORY_INFORMATION 1 /* also for QUERY_DIR */
+#define FILE_FULL_DIRECTORY_INFORMATION 2 /* also for QUERY_DIR */
+#define FILE_BOTH_DIRECTORY_INFORMATION 3 /* also for QUERY_DIR */
+#define FILE_BASIC_INFORMATION 4
+#define FILE_STANDARD_INFORMATION 5
+#define FILE_INTERNAL_INFORMATION 6
+#define FILE_EA_INFORMATION 7
+#define FILE_ACCESS_INFORMATION 8
+#define FILE_NAME_INFORMATION 9
+#define FILE_RENAME_INFORMATION 10
+#define FILE_LINK_INFORMATION 11
+#define FILE_NAMES_INFORMATION 12 /* also for QUERY_DIR */
+#define FILE_DISPOSITION_INFORMATION 13
+#define FILE_POSITION_INFORMATION 14
+#define FILE_FULL_EA_INFORMATION 15
+#define FILE_MODE_INFORMATION 16
+#define FILE_ALIGNMENT_INFORMATION 17
+#define FILE_ALL_INFORMATION 18
+#define FILE_ALLOCATION_INFORMATION 19
+#define FILE_END_OF_FILE_INFORMATION 20
+#define FILE_ALTERNATE_NAME_INFORMATION 21
+#define FILE_STREAM_INFORMATION 22
+#define FILE_PIPE_INFORMATION 23
+#define FILE_PIPE_LOCAL_INFORMATION 24
+#define FILE_PIPE_REMOTE_INFORMATION 25
+#define FILE_MAILSLOT_QUERY_INFORMATION 26
+#define FILE_MAILSLOT_SET_INFORMATION 27
+#define FILE_COMPRESSION_INFORMATION 28
+#define FILE_OBJECT_ID_INFORMATION 29
+/* Number 30 not defined in documents */
+#define FILE_MOVE_CLUSTER_INFORMATION 31
+#define FILE_QUOTA_INFORMATION 32
+#define FILE_REPARSE_POINT_INFORMATION 33
+#define FILE_NETWORK_OPEN_INFORMATION 34
+#define FILE_ATTRIBUTE_TAG_INFORMATION 35
+#define FILE_TRACKING_INFORMATION 36
+#define FILEID_BOTH_DIRECTORY_INFORMATION 37 /* also for QUERY_DIR */
+#define FILEID_FULL_DIRECTORY_INFORMATION 38 /* also for QUERY_DIR */
+#define FILE_VALID_DATA_LENGTH_INFORMATION 39
+#define FILE_SHORT_NAME_INFORMATION 40
+#define FILE_SFIO_RESERVE_INFORMATION 44
+#define FILE_SFIO_VOLUME_INFORMATION 45
+#define FILE_HARD_LINK_INFORMATION 46
+#define FILE_NORMALIZED_NAME_INFORMATION 48
+#define FILEID_GLOBAL_TX_DIRECTORY_INFORMATION 50
+#define FILE_STANDARD_LINK_INFORMATION 54
+#define FILE_ID_INFORMATION 59
+#define FILE_ID_EXTD_DIRECTORY_INFORMATION 60 /* also for QUERY_DIR */
+/* Used for Query Info and Find File POSIX Info for SMB3.1.1 and SMB1 */
+#define SMB_FIND_FILE_POSIX_INFO 0x064
+
+/* Security info type additionalinfo flags. */
+#define OWNER_SECINFO 0x00000001
+#define GROUP_SECINFO 0x00000002
+#define DACL_SECINFO 0x00000004
+#define SACL_SECINFO 0x00000008
+#define LABEL_SECINFO 0x00000010
+#define ATTRIBUTE_SECINFO 0x00000020
+#define SCOPE_SECINFO 0x00000040
+#define BACKUP_SECINFO 0x00010000
+#define UNPROTECTED_SACL_SECINFO 0x10000000
+#define UNPROTECTED_DACL_SECINFO 0x20000000
+#define PROTECTED_SACL_SECINFO 0x40000000
+#define PROTECTED_DACL_SECINFO 0x80000000
+
+/* Flags used for FileFullEAinfo */
+#define SL_RESTART_SCAN 0x00000001
+#define SL_RETURN_SINGLE_ENTRY 0x00000002
+#define SL_INDEX_SPECIFIED 0x00000004
+
+struct smb2_query_info_req {
+ struct smb2_hdr hdr;
+ __le16 StructureSize; /* Must be 41 */
+ __u8 InfoType;
+ __u8 FileInfoClass;
+ __le32 OutputBufferLength;
+ __le16 InputBufferOffset;
+ __u16 Reserved;
+ __le32 InputBufferLength;
+ __le32 AdditionalInformation;
+ __le32 Flags;
+ __u64 PersistentFileId;
+ __u64 VolatileFileId;
+ __u8 Buffer[1];
+} __packed;
+
+struct smb2_query_info_rsp {
+ struct smb2_hdr hdr;
+ __le16 StructureSize; /* Must be 9 */
+ __le16 OutputBufferOffset;
+ __le32 OutputBufferLength;
+ __u8 Buffer[1];
+} __packed;
+
+/*
+ * PDU query infolevel structure definitions
+ */
+
+struct file_allocated_range_buffer {
+ __le64 file_offset;
+ __le64 length;
+} __packed;
+
+struct smb2_file_internal_info {
+ __le64 IndexNumber;
+} __packed; /* level 6 Query */
+
+struct smb2_file_rename_info { /* encoding of request for level 10 */
+ __u8 ReplaceIfExists; /* 1 = replace existing target with new */
+ /* 0 = fail if target already exists */
+ __u8 Reserved[7];
+ __u64 RootDirectory; /* MBZ for network operations (why says spec?) */
+ __le32 FileNameLength;
+ char FileName[]; /* New name to be assigned */
+ /* padding - overall struct size must be >= 24 so filename + pad >= 6 */
+} __packed; /* level 10 Set */
+
+struct smb2_file_link_info { /* encoding of request for level 11 */
+ __u8 ReplaceIfExists; /* 1 = replace existing link with new */
+ /* 0 = fail if link already exists */
+ __u8 Reserved[7];
+ __u64 RootDirectory; /* MBZ for network operations (why says spec?) */
+ __le32 FileNameLength;
+ char FileName[]; /* Name to be assigned to new link */
+} __packed; /* level 11 Set */
+
+/*
+ * This level 18, although with struct with same name is different from cifs
+ * level 0x107. Level 0x107 has an extra u64 between AccessFlags and
+ * CurrentByteOffset.
+ */
+struct smb2_file_all_info { /* data block encoding of response to level 18 */
+ __le64 CreationTime; /* Beginning of FILE_BASIC_INFO equivalent */
+ __le64 LastAccessTime;
+ __le64 LastWriteTime;
+ __le64 ChangeTime;
+ __le32 Attributes;
+ __u32 Pad1; /* End of FILE_BASIC_INFO_INFO equivalent */
+ __le64 AllocationSize; /* Beginning of FILE_STANDARD_INFO equivalent */
+ __le64 EndOfFile; /* size ie offset to first free byte in file */
+ __le32 NumberOfLinks; /* hard links */
+ __u8 DeletePending;
+ __u8 Directory;
+ __u16 Pad2; /* End of FILE_STANDARD_INFO equivalent */
+ __le64 IndexNumber;
+ __le32 EASize;
+ __le32 AccessFlags;
+ __le64 CurrentByteOffset;
+ __le32 Mode;
+ __le32 AlignmentRequirement;
+ __le32 FileNameLength;
+ char FileName[1];
+} __packed; /* level 18 Query */
+
+struct smb2_file_eof_info { /* encoding of request for level 10 */
+ __le64 EndOfFile; /* new end of file value */
+} __packed; /* level 20 Set */
+
+/* Level 100 query info */
+struct smb311_posix_qinfo {
+ __le64 CreationTime;
+ __le64 LastAccessTime;
+ __le64 LastWriteTime;
+ __le64 ChangeTime;
+ __le64 EndOfFile;
+ __le64 AllocationSize;
+ __le32 DosAttributes;
+ __le64 Inode;
+ __le32 DeviceId;
+ __le32 Zero;
+ /* beginning of POSIX Create Context Response */
+ __le32 HardLinks;
+ __le32 ReparseTag;
+ __le32 Mode;
+ u8 Sids[];
+ /*
+ * var sized owner SID
+ * var sized group SID
+ * le32 filenamelength
+ * u8 filename[]
+ */
+} __packed;
+
+/* File System Information Classes */
+#define FS_VOLUME_INFORMATION 1 /* Query */
+#define FS_LABEL_INFORMATION 2 /* Set */
+#define FS_SIZE_INFORMATION 3 /* Query */
+#define FS_DEVICE_INFORMATION 4 /* Query */
+#define FS_ATTRIBUTE_INFORMATION 5 /* Query */
+#define FS_CONTROL_INFORMATION 6 /* Query, Set */
+#define FS_FULL_SIZE_INFORMATION 7 /* Query */
+#define FS_OBJECT_ID_INFORMATION 8 /* Query, Set */
+#define FS_DRIVER_PATH_INFORMATION 9 /* Query */
+#define FS_SECTOR_SIZE_INFORMATION 11 /* SMB3 or later. Query */
+#define FS_POSIX_INFORMATION 100 /* SMB3.1.1 POSIX. Query */
+
+struct smb2_fs_full_size_info {
+ __le64 TotalAllocationUnits;
+ __le64 CallerAvailableAllocationUnits;
+ __le64 ActualAvailableAllocationUnits;
+ __le32 SectorsPerAllocationUnit;
+ __le32 BytesPerSector;
+} __packed;
+
+#define SSINFO_FLAGS_ALIGNED_DEVICE 0x00000001
+#define SSINFO_FLAGS_PARTITION_ALIGNED_ON_DEVICE 0x00000002
+#define SSINFO_FLAGS_NO_SEEK_PENALTY 0x00000004
+#define SSINFO_FLAGS_TRIM_ENABLED 0x00000008
+
+/* sector size info struct */
+struct smb3_fs_ss_info {
+ __le32 LogicalBytesPerSector;
+ __le32 PhysicalBytesPerSectorForAtomicity;
+ __le32 PhysicalBytesPerSectorForPerf;
+ __le32 FSEffPhysicalBytesPerSectorForAtomicity;
+ __le32 Flags;
+ __le32 ByteOffsetForSectorAlignment;
+ __le32 ByteOffsetForPartitionAlignment;
+} __packed;
+
+/* File System Control Information */
+struct smb2_fs_control_info {
+ __le64 FreeSpaceStartFiltering;
+ __le64 FreeSpaceThreshold;
+ __le64 FreeSpaceStopFiltering;
+ __le64 DefaultQuotaThreshold;
+ __le64 DefaultQuotaLimit;
+ __le32 FileSystemControlFlags;
+ __le32 Padding;
+} __packed;
+
+/* volume info struct - see MS-FSCC 2.5.9 */
+#define MAX_VOL_LABEL_LEN 32
+struct smb3_fs_vol_info {
+ __le64 VolumeCreationTime;
+ __u32 VolumeSerialNumber;
+ __le32 VolumeLabelLength; /* includes trailing null */
+ __u8 SupportsObjects; /* True if eg like NTFS, supports objects */
+ __u8 Reserved;
+ __u8 VolumeLabel[]; /* variable len */
+} __packed;
+
+/* See MS-SMB2 2.2.23 through 2.2.25 */
+struct smb2_oplock_break {
+ struct smb2_hdr hdr;
+ __le16 StructureSize; /* Must be 24 */
+ __u8 OplockLevel;
+ __u8 Reserved;
+ __le32 Reserved2;
+ __u64 PersistentFid;
+ __u64 VolatileFid;
+} __packed;
+
+#define SMB2_NOTIFY_BREAK_LEASE_FLAG_ACK_REQUIRED cpu_to_le32(0x01)
+
+struct smb2_lease_break {
+ struct smb2_hdr hdr;
+ __le16 StructureSize; /* Must be 44 */
+ __le16 Epoch;
+ __le32 Flags;
+ __u8 LeaseKey[16];
+ __le32 CurrentLeaseState;
+ __le32 NewLeaseState;
+ __le32 BreakReason;
+ __le32 AccessMaskHint;
+ __le32 ShareMaskHint;
+} __packed;
+
+struct smb2_lease_ack {
+ struct smb2_hdr hdr;
+ __le16 StructureSize; /* Must be 36 */
+ __le16 Reserved;
+ __le32 Flags;
+ __u8 LeaseKey[16];
+ __le32 LeaseState;
+ __le64 LeaseDuration;
+} __packed;
+#define OP_BREAK_STRUCT_SIZE_20 24
+#define OP_BREAK_STRUCT_SIZE_21 36
#endif /* _COMMON_SMB2PDU_H */
diff --git a/fs/verity/verify.c b/fs/verity/verify.c
index 0adb970f4e73..14e2fb49cff5 100644
--- a/fs/verity/verify.c
+++ b/fs/verity/verify.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Data verification functions, i.e. hooks for ->readpages()
+ * Data verification functions, i.e. hooks for ->readahead()
*
* Copyright 2019 Google LLC
*/
@@ -214,7 +214,7 @@ EXPORT_SYMBOL_GPL(fsverity_verify_page);
* that fail verification are set to the Error state. Verification is skipped
* for pages already in the Error state, e.g. due to fscrypt decryption failure.
*
- * This is a helper function for use by the ->readpages() method of filesystems
+ * This is a helper function for use by the ->readahead() method of filesystems
* that issue bios to read data directly into the page cache. Filesystems that
* populate the page cache without issuing bios (e.g. non block-based
* filesystems) must instead call fsverity_verify_page() directly on each page.
diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c
index 353e53b892e6..b52ed339727f 100644
--- a/fs/xfs/libxfs/xfs_alloc.c
+++ b/fs/xfs/libxfs/xfs_alloc.c
@@ -82,6 +82,24 @@ xfs_prealloc_blocks(
}
/*
+ * The number of blocks per AG that we withhold from xfs_mod_fdblocks to
+ * guarantee that we can refill the AGFL prior to allocating space in a nearly
+ * full AG. Although the the space described by the free space btrees, the
+ * blocks used by the freesp btrees themselves, and the blocks owned by the
+ * AGFL are counted in the ondisk fdblocks, it's a mistake to let the ondisk
+ * free space in the AG drop so low that the free space btrees cannot refill an
+ * empty AGFL up to the minimum level. Rather than grind through empty AGs
+ * until the fs goes down, we subtract this many AG blocks from the incore
+ * fdblocks to ensure user allocation does not overcommit the space the
+ * filesystem needs for the AGFLs. The rmap btree uses a per-AG reservation to
+ * withhold space from xfs_mod_fdblocks, so we do not account for that here.
+ */
+#define XFS_ALLOCBT_AGFL_RESERVE 4
+
+/*
+ * Compute the number of blocks that we set aside to guarantee the ability to
+ * refill the AGFL and handle a full bmap btree split.
+ *
* In order to avoid ENOSPC-related deadlock caused by out-of-order locking of
* AGF buffer (PV 947395), we place constraints on the relationship among
* actual allocations for data blocks, freelist blocks, and potential file data
@@ -93,14 +111,14 @@ xfs_prealloc_blocks(
* extents need to be actually allocated. To get around this, we explicitly set
* aside a few blocks which will not be reserved in delayed allocation.
*
- * We need to reserve 4 fsbs _per AG_ for the freelist and 4 more to handle a
- * potential split of the file's bmap btree.
+ * For each AG, we need to reserve enough blocks to replenish a totally empty
+ * AGFL and 4 more to handle a potential split of the file's bmap btree.
*/
unsigned int
xfs_alloc_set_aside(
struct xfs_mount *mp)
{
- return mp->m_sb.sb_agcount * (XFS_ALLOC_AGFL_RESERVE + 4);
+ return mp->m_sb.sb_agcount * (XFS_ALLOCBT_AGFL_RESERVE + 4);
}
/*
@@ -124,12 +142,12 @@ xfs_alloc_ag_max_usable(
unsigned int blocks;
blocks = XFS_BB_TO_FSB(mp, XFS_FSS_TO_BB(mp, 4)); /* ag headers */
- blocks += XFS_ALLOC_AGFL_RESERVE;
+ blocks += XFS_ALLOCBT_AGFL_RESERVE;
blocks += 3; /* AGF, AGI btree root blocks */
if (xfs_has_finobt(mp))
blocks++; /* finobt root block */
if (xfs_has_rmapbt(mp))
- blocks++; /* rmap root block */
+ blocks++; /* rmap root block */
if (xfs_has_reflink(mp))
blocks++; /* refcount root block */
diff --git a/fs/xfs/libxfs/xfs_alloc.h b/fs/xfs/libxfs/xfs_alloc.h
index 1c14a0b1abea..d4c057b764f9 100644
--- a/fs/xfs/libxfs/xfs_alloc.h
+++ b/fs/xfs/libxfs/xfs_alloc.h
@@ -88,7 +88,6 @@ typedef struct xfs_alloc_arg {
#define XFS_ALLOC_NOBUSY (1 << 2)/* Busy extents not allowed */
/* freespace limit calculations */
-#define XFS_ALLOC_AGFL_RESERVE 4
unsigned int xfs_alloc_set_aside(struct xfs_mount *mp);
unsigned int xfs_alloc_ag_max_usable(struct xfs_mount *mp);
diff --git a/fs/xfs/xfs_bio_io.c b/fs/xfs/xfs_bio_io.c
index 32fa02945f73..ae4345b37621 100644
--- a/fs/xfs/xfs_bio_io.c
+++ b/fs/xfs/xfs_bio_io.c
@@ -9,39 +9,6 @@ static inline unsigned int bio_max_vecs(unsigned int count)
return bio_max_segs(howmany(count, PAGE_SIZE));
}
-static void
-xfs_flush_bdev_async_endio(
- struct bio *bio)
-{
- complete(bio->bi_private);
-}
-
-/*
- * Submit a request for an async cache flush to run. If the request queue does
- * not require flush operations, just skip it altogether. If the caller needs
- * to wait for the flush completion at a later point in time, they must supply a
- * valid completion. This will be signalled when the flush completes. The
- * caller never sees the bio that is issued here.
- */
-void
-xfs_flush_bdev_async(
- struct bio *bio,
- struct block_device *bdev,
- struct completion *done)
-{
- struct request_queue *q = bdev->bd_disk->queue;
-
- if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
- complete(done);
- return;
- }
-
- bio_init(bio, bdev, NULL, 0, REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC);
- bio->bi_private = done;
- bio->bi_end_io = xfs_flush_bdev_async_endio;
-
- submit_bio(bio);
-}
int
xfs_rw_bdev(
struct block_device *bdev,
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index 33e26690a8c4..68f74549fa22 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -17,6 +17,7 @@
#include "xfs_fsops.h"
#include "xfs_trans_space.h"
#include "xfs_log.h"
+#include "xfs_log_priv.h"
#include "xfs_ag.h"
#include "xfs_ag_resv.h"
#include "xfs_trace.h"
@@ -347,7 +348,7 @@ xfs_fs_counts(
cnt->allocino = percpu_counter_read_positive(&mp->m_icount);
cnt->freeino = percpu_counter_read_positive(&mp->m_ifree);
cnt->freedata = percpu_counter_read_positive(&mp->m_fdblocks) -
- mp->m_alloc_set_aside;
+ xfs_fdblocks_unavailable(mp);
spin_lock(&mp->m_sb_lock);
cnt->freertx = mp->m_sb.sb_frextents;
@@ -430,46 +431,36 @@ xfs_reserve_blocks(
* If the request is larger than the current reservation, reserve the
* blocks before we update the reserve counters. Sample m_fdblocks and
* perform a partial reservation if the request exceeds free space.
+ *
+ * The code below estimates how many blocks it can request from
+ * fdblocks to stash in the reserve pool. This is a classic TOCTOU
+ * race since fdblocks updates are not always coordinated via
+ * m_sb_lock. Set the reserve size even if there's not enough free
+ * space to fill it because mod_fdblocks will refill an undersized
+ * reserve when it can.
*/
- error = -ENOSPC;
- do {
- free = percpu_counter_sum(&mp->m_fdblocks) -
- mp->m_alloc_set_aside;
- if (free <= 0)
- break;
-
- delta = request - mp->m_resblks;
- lcounter = free - delta;
- if (lcounter < 0)
- /* We can't satisfy the request, just get what we can */
- fdblks_delta = free;
- else
- fdblks_delta = delta;
-
+ free = percpu_counter_sum(&mp->m_fdblocks) -
+ xfs_fdblocks_unavailable(mp);
+ delta = request - mp->m_resblks;
+ mp->m_resblks = request;
+ if (delta > 0 && free > 0) {
/*
* We'll either succeed in getting space from the free block
- * count or we'll get an ENOSPC. If we get a ENOSPC, it means
- * things changed while we were calculating fdblks_delta and so
- * we should try again to see if there is anything left to
- * reserve.
+ * count or we'll get an ENOSPC. Don't set the reserved flag
+ * here - we don't want to reserve the extra reserve blocks
+ * from the reserve.
*
- * Don't set the reserved flag here - we don't want to reserve
- * the extra reserve blocks from the reserve.....
+ * The desired reserve size can change after we drop the lock.
+ * Use mod_fdblocks to put the space into the reserve or into
+ * fdblocks as appropriate.
*/
+ fdblks_delta = min(free, delta);
spin_unlock(&mp->m_sb_lock);
error = xfs_mod_fdblocks(mp, -fdblks_delta, 0);
+ if (!error)
+ xfs_mod_fdblocks(mp, fdblks_delta, 0);
spin_lock(&mp->m_sb_lock);
- } while (error == -ENOSPC);
-
- /*
- * Update the reserve counters if blocks have been successfully
- * allocated.
- */
- if (!error && fdblks_delta) {
- mp->m_resblks += fdblks_delta;
- mp->m_resblks_avail += fdblks_delta;
}
-
out:
if (outval) {
outval->resblks = mp->m_resblks;
@@ -528,8 +519,11 @@ xfs_do_force_shutdown(
int tag;
const char *why;
- if (test_and_set_bit(XFS_OPSTATE_SHUTDOWN, &mp->m_opstate))
+
+ if (test_and_set_bit(XFS_OPSTATE_SHUTDOWN, &mp->m_opstate)) {
+ xlog_shutdown_wait(mp->m_log);
return;
+ }
if (mp->m_sb_bp)
mp->m_sb_bp->b_flags |= XBF_DONE;
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index 20186c584c7d..bffd6eb0b298 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -883,7 +883,7 @@ xfs_reclaim_inode(
*/
if (xlog_is_shutdown(ip->i_mount->m_log)) {
xfs_iunpin_wait(ip);
- xfs_iflush_abort(ip);
+ xfs_iflush_shutdown_abort(ip);
goto reclaim;
}
if (xfs_ipincount(ip))
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 26227d26f274..9de6205fe134 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -3631,7 +3631,7 @@ xfs_iflush_cluster(
/*
* We must use the safe variant here as on shutdown xfs_iflush_abort()
- * can remove itself from the list.
+ * will remove itself from the list.
*/
list_for_each_entry_safe(lip, n, &bp->b_li_list, li_bio_list) {
iip = (struct xfs_inode_log_item *)lip;
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index 11158fa81a09..9e6ef55cf29e 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -544,10 +544,17 @@ xfs_inode_item_push(
uint rval = XFS_ITEM_SUCCESS;
int error;
- ASSERT(iip->ili_item.li_buf);
+ if (!bp || (ip->i_flags & XFS_ISTALE)) {
+ /*
+ * Inode item/buffer is being being aborted due to cluster
+ * buffer deletion. Trigger a log force to have that operation
+ * completed and items removed from the AIL before the next push
+ * attempt.
+ */
+ return XFS_ITEM_PINNED;
+ }
- if (xfs_ipincount(ip) > 0 || xfs_buf_ispinned(bp) ||
- (ip->i_flags & XFS_ISTALE))
+ if (xfs_ipincount(ip) > 0 || xfs_buf_ispinned(bp))
return XFS_ITEM_PINNED;
if (xfs_iflags_test(ip, XFS_IFLUSHING))
@@ -834,46 +841,143 @@ xfs_buf_inode_io_fail(
}
/*
- * This is the inode flushing abort routine. It is called when
- * the filesystem is shutting down to clean up the inode state. It is
- * responsible for removing the inode item from the AIL if it has not been
- * re-logged and clearing the inode's flush state.
+ * Clear the inode logging fields so no more flushes are attempted. If we are
+ * on a buffer list, it is now safe to remove it because the buffer is
+ * guaranteed to be locked. The caller will drop the reference to the buffer
+ * the log item held.
+ */
+static void
+xfs_iflush_abort_clean(
+ struct xfs_inode_log_item *iip)
+{
+ iip->ili_last_fields = 0;
+ iip->ili_fields = 0;
+ iip->ili_fsync_fields = 0;
+ iip->ili_flush_lsn = 0;
+ iip->ili_item.li_buf = NULL;
+ list_del_init(&iip->ili_item.li_bio_list);
+}
+
+/*
+ * Abort flushing the inode from a context holding the cluster buffer locked.
+ *
+ * This is the normal runtime method of aborting writeback of an inode that is
+ * attached to a cluster buffer. It occurs when the inode and the backing
+ * cluster buffer have been freed (i.e. inode is XFS_ISTALE), or when cluster
+ * flushing or buffer IO completion encounters a log shutdown situation.
+ *
+ * If we need to abort inode writeback and we don't already hold the buffer
+ * locked, call xfs_iflush_shutdown_abort() instead as this should only ever be
+ * necessary in a shutdown situation.
*/
void
xfs_iflush_abort(
struct xfs_inode *ip)
{
struct xfs_inode_log_item *iip = ip->i_itemp;
- struct xfs_buf *bp = NULL;
+ struct xfs_buf *bp;
- if (iip) {
- /*
- * Clear the failed bit before removing the item from the AIL so
- * xfs_trans_ail_delete() doesn't try to clear and release the
- * buffer attached to the log item before we are done with it.
- */
- clear_bit(XFS_LI_FAILED, &iip->ili_item.li_flags);
- xfs_trans_ail_delete(&iip->ili_item, 0);
+ if (!iip) {
+ /* clean inode, nothing to do */
+ xfs_iflags_clear(ip, XFS_IFLUSHING);
+ return;
+ }
+
+ /*
+ * Remove the inode item from the AIL before we clear its internal
+ * state. Whilst the inode is in the AIL, it should have a valid buffer
+ * pointer for push operations to access - it is only safe to remove the
+ * inode from the buffer once it has been removed from the AIL.
+ *
+ * We also clear the failed bit before removing the item from the AIL
+ * as xfs_trans_ail_delete()->xfs_clear_li_failed() will release buffer
+ * references the inode item owns and needs to hold until we've fully
+ * aborted the inode log item and detached it from the buffer.
+ */
+ clear_bit(XFS_LI_FAILED, &iip->ili_item.li_flags);
+ xfs_trans_ail_delete(&iip->ili_item, 0);
+
+ /*
+ * Grab the inode buffer so can we release the reference the inode log
+ * item holds on it.
+ */
+ spin_lock(&iip->ili_lock);
+ bp = iip->ili_item.li_buf;
+ xfs_iflush_abort_clean(iip);
+ spin_unlock(&iip->ili_lock);
+ xfs_iflags_clear(ip, XFS_IFLUSHING);
+ if (bp)
+ xfs_buf_rele(bp);
+}
+
+/*
+ * Abort an inode flush in the case of a shutdown filesystem. This can be called
+ * from anywhere with just an inode reference and does not require holding the
+ * inode cluster buffer locked. If the inode is attached to a cluster buffer,
+ * it will grab and lock it safely, then abort the inode flush.
+ */
+void
+xfs_iflush_shutdown_abort(
+ struct xfs_inode *ip)
+{
+ struct xfs_inode_log_item *iip = ip->i_itemp;
+ struct xfs_buf *bp;
+
+ if (!iip) {
+ /* clean inode, nothing to do */
+ xfs_iflags_clear(ip, XFS_IFLUSHING);
+ return;
+ }
+
+ spin_lock(&iip->ili_lock);
+ bp = iip->ili_item.li_buf;
+ if (!bp) {
+ spin_unlock(&iip->ili_lock);
+ xfs_iflush_abort(ip);
+ return;
+ }
+
+ /*
+ * We have to take a reference to the buffer so that it doesn't get
+ * freed when we drop the ili_lock and then wait to lock the buffer.
+ * We'll clean up the extra reference after we pick up the ili_lock
+ * again.
+ */
+ xfs_buf_hold(bp);
+ spin_unlock(&iip->ili_lock);
+ xfs_buf_lock(bp);
+
+ spin_lock(&iip->ili_lock);
+ if (!iip->ili_item.li_buf) {
/*
- * Clear the inode logging fields so no more flushes are
- * attempted.
+ * Raced with another removal, hold the only reference
+ * to bp now. Inode should not be in the AIL now, so just clean
+ * up and return;
*/
- spin_lock(&iip->ili_lock);
- iip->ili_last_fields = 0;
- iip->ili_fields = 0;
- iip->ili_fsync_fields = 0;
- iip->ili_flush_lsn = 0;
- bp = iip->ili_item.li_buf;
- iip->ili_item.li_buf = NULL;
- list_del_init(&iip->ili_item.li_bio_list);
+ ASSERT(list_empty(&iip->ili_item.li_bio_list));
+ ASSERT(!test_bit(XFS_LI_IN_AIL, &iip->ili_item.li_flags));
+ xfs_iflush_abort_clean(iip);
spin_unlock(&iip->ili_lock);
+ xfs_iflags_clear(ip, XFS_IFLUSHING);
+ xfs_buf_relse(bp);
+ return;
}
- xfs_iflags_clear(ip, XFS_IFLUSHING);
- if (bp)
- xfs_buf_rele(bp);
+
+ /*
+ * Got two references to bp. The first will get dropped by
+ * xfs_iflush_abort() when the item is removed from the buffer list, but
+ * we can't drop our reference until _abort() returns because we have to
+ * unlock the buffer as well. Hence we abort and then unlock and release
+ * our reference to the buffer.
+ */
+ ASSERT(iip->ili_item.li_buf == bp);
+ spin_unlock(&iip->ili_lock);
+ xfs_iflush_abort(ip);
+ xfs_buf_relse(bp);
}
+
/*
* convert an xfs_inode_log_format struct from the old 32 bit version
* (which can have different field alignments) to the native 64 bit version
diff --git a/fs/xfs/xfs_inode_item.h b/fs/xfs/xfs_inode_item.h
index 1a302000d604..bbd836a44ff0 100644
--- a/fs/xfs/xfs_inode_item.h
+++ b/fs/xfs/xfs_inode_item.h
@@ -44,6 +44,7 @@ static inline int xfs_inode_clean(struct xfs_inode *ip)
extern void xfs_inode_item_init(struct xfs_inode *, struct xfs_mount *);
extern void xfs_inode_item_destroy(struct xfs_inode *);
extern void xfs_iflush_abort(struct xfs_inode *);
+extern void xfs_iflush_shutdown_abort(struct xfs_inode *);
extern int xfs_inode_item_format_convert(xfs_log_iovec_t *,
struct xfs_inode_log_format *);
diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h
index 09a8fba84ff9..cb9105d667db 100644
--- a/fs/xfs/xfs_linux.h
+++ b/fs/xfs/xfs_linux.h
@@ -197,8 +197,6 @@ static inline uint64_t howmany_64(uint64_t x, uint32_t y)
int xfs_rw_bdev(struct block_device *bdev, sector_t sector, unsigned int count,
char *data, unsigned int op);
-void xfs_flush_bdev_async(struct bio *bio, struct block_device *bdev,
- struct completion *done);
#define ASSERT_ALWAYS(expr) \
(likely(expr) ? (void)0 : assfail(NULL, #expr, __FILE__, __LINE__))
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index a8034c0afdf2..499e15b24215 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -487,7 +487,10 @@ out_error:
* Run all the pending iclog callbacks and wake log force waiters and iclog
* space waiters so they can process the newly set shutdown state. We really
* don't care what order we process callbacks here because the log is shut down
- * and so state cannot change on disk anymore.
+ * and so state cannot change on disk anymore. However, we cannot wake waiters
+ * until the callbacks have been processed because we may be in unmount and
+ * we must ensure that all AIL operations the callbacks perform have completed
+ * before we tear down the AIL.
*
* We avoid processing actively referenced iclogs so that we don't run callbacks
* while the iclog owner might still be preparing the iclog for IO submssion.
@@ -501,7 +504,6 @@ xlog_state_shutdown_callbacks(
struct xlog_in_core *iclog;
LIST_HEAD(cb_list);
- spin_lock(&log->l_icloglock);
iclog = log->l_iclog;
do {
if (atomic_read(&iclog->ic_refcnt)) {
@@ -509,26 +511,22 @@ xlog_state_shutdown_callbacks(
continue;
}
list_splice_init(&iclog->ic_callbacks, &cb_list);
+ spin_unlock(&log->l_icloglock);
+
+ xlog_cil_process_committed(&cb_list);
+
+ spin_lock(&log->l_icloglock);
wake_up_all(&iclog->ic_write_wait);
wake_up_all(&iclog->ic_force_wait);
} while ((iclog = iclog->ic_next) != log->l_iclog);
wake_up_all(&log->l_flush_wait);
- spin_unlock(&log->l_icloglock);
-
- xlog_cil_process_committed(&cb_list);
}
/*
* Flush iclog to disk if this is the last reference to the given iclog and the
* it is in the WANT_SYNC state.
*
- * If the caller passes in a non-zero @old_tail_lsn and the current log tail
- * does not match, there may be metadata on disk that must be persisted before
- * this iclog is written. To satisfy that requirement, set the
- * XLOG_ICL_NEED_FLUSH flag as a condition for writing this iclog with the new
- * log tail value.
- *
* If XLOG_ICL_NEED_FUA is already set on the iclog, we need to ensure that the
* log tail is updated correctly. NEED_FUA indicates that the iclog will be
* written to stable storage, and implies that a commit record is contained
@@ -545,12 +543,10 @@ xlog_state_shutdown_callbacks(
* always capture the tail lsn on the iclog on the first NEED_FUA release
* regardless of the number of active reference counts on this iclog.
*/
-
int
xlog_state_release_iclog(
struct xlog *log,
- struct xlog_in_core *iclog,
- xfs_lsn_t old_tail_lsn)
+ struct xlog_in_core *iclog)
{
xfs_lsn_t tail_lsn;
bool last_ref;
@@ -561,18 +557,14 @@ xlog_state_release_iclog(
/*
* Grabbing the current log tail needs to be atomic w.r.t. the writing
* of the tail LSN into the iclog so we guarantee that the log tail does
- * not move between deciding if a cache flush is required and writing
- * the LSN into the iclog below.
+ * not move between the first time we know that the iclog needs to be
+ * made stable and when we eventually submit it.
*/
- if (old_tail_lsn || iclog->ic_state == XLOG_STATE_WANT_SYNC) {
+ if ((iclog->ic_state == XLOG_STATE_WANT_SYNC ||
+ (iclog->ic_flags & XLOG_ICL_NEED_FUA)) &&
+ !iclog->ic_header.h_tail_lsn) {
tail_lsn = xlog_assign_tail_lsn(log->l_mp);
-
- if (old_tail_lsn && tail_lsn != old_tail_lsn)
- iclog->ic_flags |= XLOG_ICL_NEED_FLUSH;
-
- if ((iclog->ic_flags & XLOG_ICL_NEED_FUA) &&
- !iclog->ic_header.h_tail_lsn)
- iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn);
+ iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn);
}
last_ref = atomic_dec_and_test(&iclog->ic_refcnt);
@@ -583,11 +575,8 @@ xlog_state_release_iclog(
* pending iclog callbacks that were waiting on the release of
* this iclog.
*/
- if (last_ref) {
- spin_unlock(&log->l_icloglock);
+ if (last_ref)
xlog_state_shutdown_callbacks(log);
- spin_lock(&log->l_icloglock);
- }
return -EIO;
}
@@ -600,8 +589,6 @@ xlog_state_release_iclog(
}
iclog->ic_state = XLOG_STATE_SYNCING;
- if (!iclog->ic_header.h_tail_lsn)
- iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn);
xlog_verify_tail_lsn(log, iclog);
trace_xlog_iclog_syncing(iclog, _RET_IP_);
@@ -873,7 +860,7 @@ xlog_force_iclog(
iclog->ic_flags |= XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA;
if (iclog->ic_state == XLOG_STATE_ACTIVE)
xlog_state_switch_iclogs(iclog->ic_log, iclog, 0);
- return xlog_state_release_iclog(iclog->ic_log, iclog, 0);
+ return xlog_state_release_iclog(iclog->ic_log, iclog);
}
/*
@@ -1373,7 +1360,7 @@ xlog_ioend_work(
*/
if (XFS_TEST_ERROR(error, log->l_mp, XFS_ERRTAG_IODONE_IOERR)) {
xfs_alert(log->l_mp, "log I/O error %d", error);
- xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR);
+ xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
}
xlog_state_done_syncing(iclog);
@@ -1912,7 +1899,7 @@ xlog_write_iclog(
iclog->ic_flags &= ~(XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA);
if (xlog_map_iclog_data(&iclog->ic_bio, iclog->ic_data, count)) {
- xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR);
+ xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
return;
}
if (is_vmalloc_addr(iclog->ic_data))
@@ -2411,7 +2398,7 @@ xlog_write_copy_finish(
ASSERT(iclog->ic_state == XLOG_STATE_WANT_SYNC ||
xlog_is_shutdown(log));
release_iclog:
- error = xlog_state_release_iclog(log, iclog, 0);
+ error = xlog_state_release_iclog(log, iclog);
spin_unlock(&log->l_icloglock);
return error;
}
@@ -2487,7 +2474,7 @@ xlog_write(
xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES,
"ctx ticket reservation ran out. Need to up reservation");
xlog_print_tic_res(log->l_mp, ticket);
- xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR);
+ xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
}
len = xlog_write_calc_vec_length(ticket, log_vector, optype);
@@ -2628,7 +2615,7 @@ next_lv:
spin_lock(&log->l_icloglock);
xlog_state_finish_copy(log, iclog, record_cnt, data_cnt);
- error = xlog_state_release_iclog(log, iclog, 0);
+ error = xlog_state_release_iclog(log, iclog);
spin_unlock(&log->l_icloglock);
return error;
@@ -3052,7 +3039,7 @@ restart:
* reference to the iclog.
*/
if (!atomic_add_unless(&iclog->ic_refcnt, -1, 1))
- error = xlog_state_release_iclog(log, iclog, 0);
+ error = xlog_state_release_iclog(log, iclog);
spin_unlock(&log->l_icloglock);
if (error)
return error;
@@ -3821,9 +3808,10 @@ xlog_verify_iclog(
#endif
/*
- * Perform a forced shutdown on the log. This should be called once and once
- * only by the high level filesystem shutdown code to shut the log subsystem
- * down cleanly.
+ * Perform a forced shutdown on the log.
+ *
+ * This can be called from low level log code to trigger a shutdown, or from the
+ * high level mount shutdown code when the mount shuts down.
*
* Our main objectives here are to make sure that:
* a. if the shutdown was not due to a log IO error, flush the logs to
@@ -3832,6 +3820,8 @@ xlog_verify_iclog(
* parties to find out. Nothing new gets queued after this is done.
* c. Tasks sleeping on log reservations, pinned objects and
* other resources get woken up.
+ * d. The mount is also marked as shut down so that log triggered shutdowns
+ * still behave the same as if they called xfs_forced_shutdown().
*
* Return true if the shutdown cause was a log IO error and we actually shut the
* log down.
@@ -3843,25 +3833,25 @@ xlog_force_shutdown(
{
bool log_error = (shutdown_flags & SHUTDOWN_LOG_IO_ERROR);
- /*
- * If this happens during log recovery then we aren't using the runtime
- * log mechanisms yet so there's nothing to shut down.
- */
- if (!log || xlog_in_recovery(log))
+ if (!log)
return false;
- ASSERT(!xlog_is_shutdown(log));
-
/*
* Flush all the completed transactions to disk before marking the log
* being shut down. We need to do this first as shutting down the log
* before the force will prevent the log force from flushing the iclogs
* to disk.
*
- * Re-entry due to a log IO error shutdown during the log force is
- * prevented by the atomicity of higher level shutdown code.
+ * When we are in recovery, there are no transactions to flush, and
+ * we don't want to touch the log because we don't want to perturb the
+ * current head/tail for future recovery attempts. Hence we need to
+ * avoid a log force in this case.
+ *
+ * If we are shutting down due to a log IO error, then we must avoid
+ * trying to write the log as that may just result in more IO errors and
+ * an endless shutdown/force loop.
*/
- if (!log_error)
+ if (!log_error && !xlog_in_recovery(log))
xfs_log_force(log->l_mp, XFS_LOG_SYNC);
/*
@@ -3878,12 +3868,25 @@ xlog_force_shutdown(
spin_lock(&log->l_icloglock);
if (test_and_set_bit(XLOG_IO_ERROR, &log->l_opstate)) {
spin_unlock(&log->l_icloglock);
- ASSERT(0);
return false;
}
spin_unlock(&log->l_icloglock);
/*
+ * If this log shutdown also sets the mount shutdown state, issue a
+ * shutdown warning message.
+ */
+ if (!test_and_set_bit(XFS_OPSTATE_SHUTDOWN, &log->l_mp->m_opstate)) {
+ xfs_alert_tag(log->l_mp, XFS_PTAG_SHUTDOWN_LOGERROR,
+"Filesystem has been shut down due to log error (0x%x).",
+ shutdown_flags);
+ xfs_alert(log->l_mp,
+"Please unmount the filesystem and rectify the problem(s).");
+ if (xfs_error_level >= XFS_ERRLEVEL_HIGH)
+ xfs_stack_trace();
+ }
+
+ /*
* We don't want anybody waiting for log reservations after this. That
* means we have to wake up everybody queued up on reserveq as well as
* writeq. In addition, we make sure in xlog_{re}grant_log_space that
@@ -3903,8 +3906,12 @@ xlog_force_shutdown(
wake_up_all(&log->l_cilp->xc_start_wait);
wake_up_all(&log->l_cilp->xc_commit_wait);
spin_unlock(&log->l_cilp->xc_push_lock);
+
+ spin_lock(&log->l_icloglock);
xlog_state_shutdown_callbacks(log);
+ spin_unlock(&log->l_icloglock);
+ wake_up_var(&log->l_opstate);
return log_error;
}
diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c
index 796e4464f809..ba57323bfdce 100644
--- a/fs/xfs/xfs_log_cil.c
+++ b/fs/xfs/xfs_log_cil.c
@@ -540,7 +540,7 @@ xlog_cil_insert_items(
spin_unlock(&cil->xc_cil_lock);
if (tp->t_ticket->t_curr_res < 0)
- xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR);
+ xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
}
static void
@@ -705,11 +705,21 @@ xlog_cil_set_ctx_write_state(
* The LSN we need to pass to the log items on transaction
* commit is the LSN reported by the first log vector write, not
* the commit lsn. If we use the commit record lsn then we can
- * move the tail beyond the grant write head.
+ * move the grant write head beyond the tail LSN and overwrite
+ * it.
*/
ctx->start_lsn = lsn;
wake_up_all(&cil->xc_start_wait);
spin_unlock(&cil->xc_push_lock);
+
+ /*
+ * Make sure the metadata we are about to overwrite in the log
+ * has been flushed to stable storage before this iclog is
+ * issued.
+ */
+ spin_lock(&cil->xc_log->l_icloglock);
+ iclog->ic_flags |= XLOG_ICL_NEED_FLUSH;
+ spin_unlock(&cil->xc_log->l_icloglock);
return;
}
@@ -854,7 +864,7 @@ xlog_cil_write_commit_record(
error = xlog_write(log, ctx, &vec, ctx->ticket, XLOG_COMMIT_TRANS);
if (error)
- xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR);
+ xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
return error;
}
@@ -888,10 +898,7 @@ xlog_cil_push_work(
struct xfs_trans_header thdr;
struct xfs_log_iovec lhdr;
struct xfs_log_vec lvhdr = { NULL };
- xfs_lsn_t preflush_tail_lsn;
xfs_csn_t push_seq;
- struct bio bio;
- DECLARE_COMPLETION_ONSTACK(bdev_flush);
bool push_commit_stable;
new_ctx = xlog_cil_ctx_alloc();
@@ -962,23 +969,6 @@ xlog_cil_push_work(
spin_unlock(&cil->xc_push_lock);
/*
- * The CIL is stable at this point - nothing new will be added to it
- * because we hold the flush lock exclusively. Hence we can now issue
- * a cache flush to ensure all the completed metadata in the journal we
- * are about to overwrite is on stable storage.
- *
- * Because we are issuing this cache flush before we've written the
- * tail lsn to the iclog, we can have metadata IO completions move the
- * tail forwards between the completion of this flush and the iclog
- * being written. In this case, we need to re-issue the cache flush
- * before the iclog write. To detect whether the log tail moves, sample
- * the tail LSN *before* we issue the flush.
- */
- preflush_tail_lsn = atomic64_read(&log->l_tail_lsn);
- xfs_flush_bdev_async(&bio, log->l_mp->m_ddev_targp->bt_bdev,
- &bdev_flush);
-
- /*
* Pull all the log vectors off the items in the CIL, and remove the
* items from the CIL. We don't need the CIL lock here because it's only
* needed on the transaction commit side which is currently locked out
@@ -1054,12 +1044,6 @@ xlog_cil_push_work(
lvhdr.lv_iovecp = &lhdr;
lvhdr.lv_next = ctx->lv_chain;
- /*
- * Before we format and submit the first iclog, we have to ensure that
- * the metadata writeback ordering cache flush is complete.
- */
- wait_for_completion(&bdev_flush);
-
error = xlog_cil_write_chain(ctx, &lvhdr);
if (error)
goto out_abort_free_ticket;
@@ -1118,7 +1102,7 @@ xlog_cil_push_work(
if (push_commit_stable &&
ctx->commit_iclog->ic_state == XLOG_STATE_ACTIVE)
xlog_state_switch_iclogs(log, ctx->commit_iclog, 0);
- xlog_state_release_iclog(log, ctx->commit_iclog, preflush_tail_lsn);
+ xlog_state_release_iclog(log, ctx->commit_iclog);
/* Not safe to reference ctx now! */
@@ -1139,7 +1123,7 @@ out_abort_free_ticket:
return;
}
spin_lock(&log->l_icloglock);
- xlog_state_release_iclog(log, ctx->commit_iclog, 0);
+ xlog_state_release_iclog(log, ctx->commit_iclog);
/* Not safe to reference ctx now! */
spin_unlock(&log->l_icloglock);
}
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h
index 23103d68423c..401cdc400980 100644
--- a/fs/xfs/xfs_log_priv.h
+++ b/fs/xfs/xfs_log_priv.h
@@ -484,6 +484,17 @@ xlog_is_shutdown(struct xlog *log)
return test_bit(XLOG_IO_ERROR, &log->l_opstate);
}
+/*
+ * Wait until the xlog_force_shutdown() has marked the log as shut down
+ * so xlog_is_shutdown() will always return true.
+ */
+static inline void
+xlog_shutdown_wait(
+ struct xlog *log)
+{
+ wait_var_event(&log->l_opstate, xlog_is_shutdown(log));
+}
+
/* common routines */
extern int
xlog_recover(
@@ -524,8 +535,7 @@ void xfs_log_ticket_regrant(struct xlog *log, struct xlog_ticket *ticket);
void xlog_state_switch_iclogs(struct xlog *log, struct xlog_in_core *iclog,
int eventual_size);
-int xlog_state_release_iclog(struct xlog *log, struct xlog_in_core *iclog,
- xfs_lsn_t log_tail_lsn);
+int xlog_state_release_iclog(struct xlog *log, struct xlog_in_core *iclog);
/*
* When we crack an atomic LSN, we sample it first so that the value will not
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 96c997ed2ec8..c4ad4296c540 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -2485,7 +2485,7 @@ xlog_finish_defer_ops(
error = xfs_trans_alloc(mp, &resv, dfc->dfc_blkres,
dfc->dfc_rtxres, XFS_TRANS_RESERVE, &tp);
if (error) {
- xfs_force_shutdown(mp, SHUTDOWN_LOG_IO_ERROR);
+ xlog_force_shutdown(mp->m_log, SHUTDOWN_LOG_IO_ERROR);
return error;
}
@@ -2519,21 +2519,22 @@ xlog_abort_defer_ops(
xfs_defer_ops_capture_free(mp, dfc);
}
}
+
/*
* When this is called, all of the log intent items which did not have
- * corresponding log done items should be in the AIL. What we do now
- * is update the data structures associated with each one.
+ * corresponding log done items should be in the AIL. What we do now is update
+ * the data structures associated with each one.
*
- * Since we process the log intent items in normal transactions, they
- * will be removed at some point after the commit. This prevents us
- * from just walking down the list processing each one. We'll use a
- * flag in the intent item to skip those that we've already processed
- * and use the AIL iteration mechanism's generation count to try to
- * speed this up at least a bit.
+ * Since we process the log intent items in normal transactions, they will be
+ * removed at some point after the commit. This prevents us from just walking
+ * down the list processing each one. We'll use a flag in the intent item to
+ * skip those that we've already processed and use the AIL iteration mechanism's
+ * generation count to try to speed this up at least a bit.
*
- * When we start, we know that the intents are the only things in the
- * AIL. As we process them, however, other items are added to the
- * AIL.
+ * When we start, we know that the intents are the only things in the AIL. As we
+ * process them, however, other items are added to the AIL. Hence we know we
+ * have started recovery on all the pending intents when we find an non-intent
+ * item in the AIL.
*/
STATIC int
xlog_recover_process_intents(
@@ -2556,17 +2557,8 @@ xlog_recover_process_intents(
for (lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
lip != NULL;
lip = xfs_trans_ail_cursor_next(ailp, &cur)) {
- /*
- * We're done when we see something other than an intent.
- * There should be no intents left in the AIL now.
- */
- if (!xlog_item_is_intent(lip)) {
-#ifdef DEBUG
- for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur))
- ASSERT(!xlog_item_is_intent(lip));
-#endif
+ if (!xlog_item_is_intent(lip))
break;
- }
/*
* We should never see a redo item with a LSN higher than
@@ -2607,8 +2599,9 @@ err:
}
/*
- * A cancel occurs when the mount has failed and we're bailing out.
- * Release all pending log intent items so they don't pin the AIL.
+ * A cancel occurs when the mount has failed and we're bailing out. Release all
+ * pending log intent items that we haven't started recovery on so they don't
+ * pin the AIL.
*/
STATIC void
xlog_recover_cancel_intents(
@@ -2622,17 +2615,8 @@ xlog_recover_cancel_intents(
spin_lock(&ailp->ail_lock);
lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
while (lip != NULL) {
- /*
- * We're done when we see something other than an intent.
- * There should be no intents left in the AIL now.
- */
- if (!xlog_item_is_intent(lip)) {
-#ifdef DEBUG
- for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur))
- ASSERT(!xlog_item_is_intent(lip));
-#endif
+ if (!xlog_item_is_intent(lip))
break;
- }
spin_unlock(&ailp->ail_lock);
lip->li_ops->iop_release(lip);
@@ -3470,7 +3454,7 @@ xlog_recover_finish(
*/
xlog_recover_cancel_intents(log);
xfs_alert(log->l_mp, "Failed to recover intents");
- xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR);
+ xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
return error;
}
@@ -3517,7 +3501,7 @@ xlog_recover_finish(
* end of intents processing can be pushed through the CIL
* and AIL.
*/
- xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR);
+ xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
}
return 0;
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index bed73e8002a5..c5f153c3693f 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -21,6 +21,7 @@
#include "xfs_trans.h"
#include "xfs_trans_priv.h"
#include "xfs_log.h"
+#include "xfs_log_priv.h"
#include "xfs_error.h"
#include "xfs_quota.h"
#include "xfs_fsops.h"
@@ -1146,7 +1147,7 @@ xfs_mod_fdblocks(
* problems (i.e. transaction abort, pagecache discards, etc.) than
* slightly premature -ENOSPC.
*/
- set_aside = mp->m_alloc_set_aside + atomic64_read(&mp->m_allocbt_blks);
+ set_aside = xfs_fdblocks_unavailable(mp);
percpu_counter_add_batch(&mp->m_fdblocks, delta, batch);
if (__percpu_counter_compare(&mp->m_fdblocks, set_aside,
XFS_FDBLOCKS_BATCH) >= 0) {
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index 00720a02e761..f6dc19de8322 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -479,6 +479,21 @@ extern void xfs_unmountfs(xfs_mount_t *);
*/
#define XFS_FDBLOCKS_BATCH 1024
+/*
+ * Estimate the amount of free space that is not available to userspace and is
+ * not explicitly reserved from the incore fdblocks. This includes:
+ *
+ * - The minimum number of blocks needed to support splitting a bmap btree
+ * - The blocks currently in use by the freespace btrees because they record
+ * the actual blocks that will fill per-AG metadata space reservations
+ */
+static inline uint64_t
+xfs_fdblocks_unavailable(
+ struct xfs_mount *mp)
+{
+ return mp->m_alloc_set_aside + atomic64_read(&mp->m_allocbt_blks);
+}
+
extern int xfs_mod_fdblocks(struct xfs_mount *mp, int64_t delta,
bool reserved);
extern int xfs_mod_frextents(struct xfs_mount *mp, int64_t delta);
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index d84714e4e46a..54be9d64093e 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -815,7 +815,8 @@ xfs_fs_statfs(
spin_unlock(&mp->m_sb_lock);
/* make sure statp->f_bfree does not underflow */
- statp->f_bfree = max_t(int64_t, fdblocks - mp->m_alloc_set_aside, 0);
+ statp->f_bfree = max_t(int64_t, 0,
+ fdblocks - xfs_fdblocks_unavailable(mp));
statp->f_bavail = statp->f_bfree;
fakeinos = XFS_FSB_TO_INO(mp, statp->f_bfree);
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index 917a69f0a6ff..0ac717aad380 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -836,6 +836,7 @@ __xfs_trans_commit(
bool regrant)
{
struct xfs_mount *mp = tp->t_mountp;
+ struct xlog *log = mp->m_log;
xfs_csn_t commit_seq = 0;
int error = 0;
int sync = tp->t_flags & XFS_TRANS_SYNC;
@@ -864,7 +865,13 @@ __xfs_trans_commit(
if (!(tp->t_flags & XFS_TRANS_DIRTY))
goto out_unreserve;
- if (xfs_is_shutdown(mp)) {
+ /*
+ * We must check against log shutdown here because we cannot abort log
+ * items and leave them dirty, inconsistent and unpinned in memory while
+ * the log is active. This leaves them open to being written back to
+ * disk, and that will lead to on-disk corruption.
+ */
+ if (xlog_is_shutdown(log)) {
error = -EIO;
goto out_unreserve;
}
@@ -878,7 +885,7 @@ __xfs_trans_commit(
xfs_trans_apply_sb_deltas(tp);
xfs_trans_apply_dquot_deltas(tp);
- xlog_cil_commit(mp->m_log, tp, &commit_seq, regrant);
+ xlog_cil_commit(log, tp, &commit_seq, regrant);
xfs_trans_free(tp);
@@ -905,10 +912,10 @@ out_unreserve:
*/
xfs_trans_unreserve_and_mod_dquots(tp);
if (tp->t_ticket) {
- if (regrant && !xlog_is_shutdown(mp->m_log))
- xfs_log_ticket_regrant(mp->m_log, tp->t_ticket);
+ if (regrant && !xlog_is_shutdown(log))
+ xfs_log_ticket_regrant(log, tp->t_ticket);
else
- xfs_log_ticket_ungrant(mp->m_log, tp->t_ticket);
+ xfs_log_ticket_ungrant(log, tp->t_ticket);
tp->t_ticket = NULL;
}
xfs_trans_free_items(tp, !!error);
@@ -926,18 +933,27 @@ xfs_trans_commit(
}
/*
- * Unlock all of the transaction's items and free the transaction.
- * The transaction must not have modified any of its items, because
- * there is no way to restore them to their previous state.
+ * Unlock all of the transaction's items and free the transaction. If the
+ * transaction is dirty, we must shut down the filesystem because there is no
+ * way to restore them to their previous state.
*
- * If the transaction has made a log reservation, make sure to release
- * it as well.
+ * If the transaction has made a log reservation, make sure to release it as
+ * well.
+ *
+ * This is a high level function (equivalent to xfs_trans_commit()) and so can
+ * be called after the transaction has effectively been aborted due to the mount
+ * being shut down. However, if the mount has not been shut down and the
+ * transaction is dirty we will shut the mount down and, in doing so, that
+ * guarantees that the log is shut down, too. Hence we don't need to be as
+ * careful with shutdown state and dirty items here as we need to be in
+ * xfs_trans_commit().
*/
void
xfs_trans_cancel(
struct xfs_trans *tp)
{
struct xfs_mount *mp = tp->t_mountp;
+ struct xlog *log = mp->m_log;
bool dirty = (tp->t_flags & XFS_TRANS_DIRTY);
trace_xfs_trans_cancel(tp, _RET_IP_);
@@ -955,16 +971,18 @@ xfs_trans_cancel(
}
/*
- * See if the caller is relying on us to shut down the
- * filesystem. This happens in paths where we detect
- * corruption and decide to give up.
+ * See if the caller is relying on us to shut down the filesystem. We
+ * only want an error report if there isn't already a shutdown in
+ * progress, so we only need to check against the mount shutdown state
+ * here.
*/
if (dirty && !xfs_is_shutdown(mp)) {
XFS_ERROR_REPORT("xfs_trans_cancel", XFS_ERRLEVEL_LOW, mp);
xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
}
#ifdef DEBUG
- if (!dirty && !xfs_is_shutdown(mp)) {
+ /* Log items need to be consistent until the log is shut down. */
+ if (!dirty && !xlog_is_shutdown(log)) {
struct xfs_log_item *lip;
list_for_each_entry(lip, &tp->t_items, li_trans)
@@ -975,7 +993,7 @@ xfs_trans_cancel(
xfs_trans_unreserve_and_mod_dquots(tp);
if (tp->t_ticket) {
- xfs_log_ticket_ungrant(mp->m_log, tp->t_ticket);
+ xfs_log_ticket_ungrant(log, tp->t_ticket);
tp->t_ticket = NULL;
}
diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c
index c2ccb98c7bcd..d3a97a028560 100644
--- a/fs/xfs/xfs_trans_ail.c
+++ b/fs/xfs/xfs_trans_ail.c
@@ -873,17 +873,17 @@ xfs_trans_ail_delete(
int shutdown_type)
{
struct xfs_ail *ailp = lip->li_ailp;
- struct xfs_mount *mp = ailp->ail_log->l_mp;
+ struct xlog *log = ailp->ail_log;
xfs_lsn_t tail_lsn;
spin_lock(&ailp->ail_lock);
if (!test_bit(XFS_LI_IN_AIL, &lip->li_flags)) {
spin_unlock(&ailp->ail_lock);
- if (shutdown_type && !xlog_is_shutdown(ailp->ail_log)) {
- xfs_alert_tag(mp, XFS_PTAG_AILDELETE,
+ if (shutdown_type && !xlog_is_shutdown(log)) {
+ xfs_alert_tag(log->l_mp, XFS_PTAG_AILDELETE,
"%s: attempting to delete a log item that is not in the AIL",
__func__);
- xfs_force_shutdown(mp, shutdown_type);
+ xlog_force_shutdown(log, shutdown_type);
}
return;
}
diff --git a/include/dt-bindings/clock/sun6i-rtc.h b/include/dt-bindings/clock/sun6i-rtc.h
new file mode 100644
index 000000000000..c845493e4d37
--- /dev/null
+++ b/include/dt-bindings/clock/sun6i-rtc.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+
+#ifndef _DT_BINDINGS_CLK_SUN6I_RTC_H_
+#define _DT_BINDINGS_CLK_SUN6I_RTC_H_
+
+#define CLK_OSC32K 0
+#define CLK_OSC32K_FANOUT 1
+#define CLK_IOSC 2
+
+#endif /* _DT_BINDINGS_CLK_SUN6I_RTC_H_ */
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
index f2ad8ed8f777..652cd05b0924 100644
--- a/include/linux/blk-cgroup.h
+++ b/include/linux/blk-cgroup.h
@@ -95,7 +95,10 @@ struct blkcg_gq {
spinlock_t async_bio_lock;
struct bio_list async_bios;
- struct work_struct async_bio_work;
+ union {
+ struct work_struct async_bio_work;
+ struct work_struct free_work;
+ };
atomic_t use_delay;
atomic64_t delay_nsec;
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index dd0763a1c674..1973ef9bd40f 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -85,8 +85,10 @@ struct block_device {
*/
#if defined(CONFIG_ALPHA) && !defined(__alpha_bwx__)
typedef u32 __bitwise blk_status_t;
+typedef u32 blk_short_t;
#else
typedef u8 __bitwise blk_status_t;
+typedef u16 blk_short_t;
#endif
#define BLK_STS_OK 0
#define BLK_STS_NOTSUPP ((__force blk_status_t)1)
diff --git a/include/linux/clk/sunxi-ng.h b/include/linux/clk/sunxi-ng.h
index cf32123b39f5..57c8ec44ab4e 100644
--- a/include/linux/clk/sunxi-ng.h
+++ b/include/linux/clk/sunxi-ng.h
@@ -9,4 +9,6 @@
int sunxi_ccu_set_mmc_timing_mode(struct clk *clk, bool new_mode);
int sunxi_ccu_get_mmc_timing_mode(struct clk *clk);
+int sun6i_rtc_ccu_probe(struct device *dev, void __iomem *reg);
+
#endif
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 183160872133..bbde95387a23 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -275,7 +275,6 @@ enum positive_aop_returns {
AOP_TRUNCATED_PAGE = 0x80001,
};
-#define AOP_FLAG_CONT_EXPAND 0x0001 /* called from cont_expand */
#define AOP_FLAG_NOFS 0x0002 /* used by filesystem to direct
* helper code (eg buffer layer)
* to clear GFP_FS from alloc */
@@ -338,28 +337,6 @@ static inline bool is_sync_kiocb(struct kiocb *kiocb)
return kiocb->ki_complete == NULL;
}
-/*
- * "descriptor" for what we're up to with a read.
- * This allows us to use the same read code yet
- * have multiple different users of the data that
- * we read from a file.
- *
- * The simplest case just copies the data to user
- * mode.
- */
-typedef struct {
- size_t written;
- size_t count;
- union {
- char __user *buf;
- void *data;
- } arg;
- int error;
-} read_descriptor_t;
-
-typedef int (*read_actor_t)(read_descriptor_t *, struct page *,
- unsigned long, unsigned long);
-
struct address_space_operations {
int (*writepage)(struct page *page, struct writeback_control *wbc);
int (*readpage)(struct file *, struct page *);
@@ -370,12 +347,6 @@ struct address_space_operations {
/* Mark a folio dirty. Return true if this dirtied it */
bool (*dirty_folio)(struct address_space *, struct folio *);
- /*
- * Reads in the requested pages. Unlike ->readpage(), this is
- * PURELY used for read-ahead!.
- */
- int (*readpages)(struct file *filp, struct address_space *mapping,
- struct list_head *pages, unsigned nr_pages);
void (*readahead)(struct readahead_control *);
int (*write_begin)(struct file *, struct address_space *mapping,
@@ -3027,7 +2998,7 @@ extern ssize_t generic_file_read_iter(struct kiocb *, struct iov_iter *);
extern ssize_t __generic_file_write_iter(struct kiocb *, struct iov_iter *);
extern ssize_t generic_file_write_iter(struct kiocb *, struct iov_iter *);
extern ssize_t generic_file_direct_write(struct kiocb *, struct iov_iter *);
-extern ssize_t generic_perform_write(struct file *, struct iov_iter *, loff_t);
+ssize_t generic_perform_write(struct kiocb *, struct iov_iter *);
ssize_t vfs_iter_read(struct file *file, struct iov_iter *iter, loff_t *ppos,
rwf_t flags);
diff --git a/include/linux/fsverity.h b/include/linux/fsverity.h
index b568b3c7d095..a7afc800bd8d 100644
--- a/include/linux/fsverity.h
+++ b/include/linux/fsverity.h
@@ -221,7 +221,7 @@ static inline void fsverity_enqueue_verify_work(struct work_struct *work)
*
* This checks whether ->i_verity_info has been set.
*
- * Filesystems call this from ->readpages() to check whether the pages need to
+ * Filesystems call this from ->readahead() to check whether the pages need to
* be verified or not. Don't use IS_VERITY() for this purpose; it's subject to
* a race condition where the file is being read concurrently with
* FS_IOC_ENABLE_VERITY completing. (S_VERITY is set before ->i_verity_info.)
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 0fa17fb85de5..761f8f1885c7 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -264,9 +264,7 @@ struct vm_area_struct;
#define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP)
/* Room for N __GFP_FOO bits */
-#define __GFP_BITS_SHIFT (24 + \
- 3 * IS_ENABLED(CONFIG_KASAN_HW_TAGS) + \
- IS_ENABLED(CONFIG_LOCKDEP))
+#define __GFP_BITS_SHIFT (27 + IS_ENABLED(CONFIG_LOCKDEP))
#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
/**
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index b0728c8ad90c..98c93510640e 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -168,13 +168,16 @@ struct gpio_irq_chip {
/**
* @parent_handler_data:
+ *
+ * If @per_parent_data is false, @parent_handler_data is a single
+ * pointer used as the data associated with every parent interrupt.
+ *
* @parent_handler_data_array:
*
- * Data associated, and passed to, the handler for the parent
- * interrupt. Can either be a single pointer if @per_parent_data
- * is false, or an array of @num_parents pointers otherwise. If
- * @per_parent_data is true, @parent_handler_data_array cannot be
- * NULL.
+ * If @per_parent_data is true, @parent_handler_data_array is
+ * an array of @num_parents pointers, and is used to associate
+ * different data for each parent. This cannot be NULL if
+ * @per_parent_data is true.
*/
union {
void *parent_handler_data;
diff --git a/include/linux/input.h b/include/linux/input.h
index 0354b298d874..49790c1bd2c4 100644
--- a/include/linux/input.h
+++ b/include/linux/input.h
@@ -475,6 +475,8 @@ static inline void input_set_events_per_packet(struct input_dev *dev, int n_even
void input_alloc_absinfo(struct input_dev *dev);
void input_set_abs_params(struct input_dev *dev, unsigned int axis,
int min, int max, int fuzz, int flat);
+void input_copy_abs(struct input_dev *dst, unsigned int dst_axis,
+ const struct input_dev *src, unsigned int src_axis);
#define INPUT_GENERATE_ABS_ACCESSORS(_suffix, _item) \
static inline int input_abs_get_##_suffix(struct input_dev *dev, \
diff --git a/include/linux/input/vivaldi-fmap.h b/include/linux/input/vivaldi-fmap.h
new file mode 100644
index 000000000000..7e4b7023bf04
--- /dev/null
+++ b/include/linux/input/vivaldi-fmap.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _VIVALDI_FMAP_H
+#define _VIVALDI_FMAP_H
+
+#include <linux/types.h>
+
+#define VIVALDI_MAX_FUNCTION_ROW_KEYS 24
+
+/**
+ * struct vivaldi_data - Function row map data for ChromeOS Vivaldi keyboards
+ * @function_row_physmap: An array of scancodes or their equivalent (HID usage
+ * codes, encoded rows/columns, etc) for the top
+ * row function keys, in an order from left to right
+ * @num_function_row_keys: The number of top row keys in a custom keyboard
+ *
+ * This structure is supposed to be used by ChromeOS keyboards using
+ * the Vivaldi keyboard function row design.
+ */
+struct vivaldi_data {
+ u32 function_row_physmap[VIVALDI_MAX_FUNCTION_ROW_KEYS];
+ unsigned int num_function_row_keys;
+};
+
+ssize_t vivaldi_function_row_physmap_show(const struct vivaldi_data *data,
+ char *buf);
+
+#endif /* _VIVALDI_FMAP_H */
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 9536ffa0473b..3f9b22c4983a 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -148,6 +148,7 @@ static inline bool is_error_page(struct page *page)
#define KVM_REQUEST_MASK GENMASK(7,0)
#define KVM_REQUEST_NO_WAKEUP BIT(8)
#define KVM_REQUEST_WAIT BIT(9)
+#define KVM_REQUEST_NO_ACTION BIT(10)
/*
* Architecture-independent vcpu->requests bit members
* Bits 4-7 are reserved for more arch-independent bits.
@@ -156,9 +157,18 @@ static inline bool is_error_page(struct page *page)
#define KVM_REQ_VM_DEAD (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_UNBLOCK 2
#define KVM_REQ_UNHALT 3
-#define KVM_REQ_GPC_INVALIDATE (5 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQUEST_ARCH_BASE 8
+/*
+ * KVM_REQ_OUTSIDE_GUEST_MODE exists is purely as way to force the vCPU to
+ * OUTSIDE_GUEST_MODE. KVM_REQ_OUTSIDE_GUEST_MODE differs from a vCPU "kick"
+ * in that it ensures the vCPU has reached OUTSIDE_GUEST_MODE before continuing
+ * on. A kick only guarantees that the vCPU is on its way out, e.g. a previous
+ * kick may have set vcpu->mode to EXITING_GUEST_MODE, and so there's no
+ * guarantee the vCPU received an IPI and has actually exited guest mode.
+ */
+#define KVM_REQ_OUTSIDE_GUEST_MODE (KVM_REQUEST_NO_ACTION | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
+
#define KVM_ARCH_REQ_FLAGS(nr, flags) ({ \
BUILD_BUG_ON((unsigned)(nr) >= (sizeof_field(struct kvm_vcpu, requests) * 8) - KVM_REQUEST_ARCH_BASE); \
(unsigned)(((nr) + KVM_REQUEST_ARCH_BASE) | (flags)); \
@@ -1221,27 +1231,27 @@ void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
* @gpc: struct gfn_to_pfn_cache object.
* @vcpu: vCPU to be used for marking pages dirty and to be woken on
* invalidation.
- * @guest_uses_pa: indicates that the resulting host physical PFN is used while
- * @vcpu is IN_GUEST_MODE so invalidations should wake it.
- * @kernel_map: requests a kernel virtual mapping (kmap / memremap).
+ * @usage: indicates if the resulting host physical PFN is used while
+ * the @vcpu is IN_GUEST_MODE (in which case invalidation of
+ * the cache from MMU notifiers---but not for KVM memslot
+ * changes!---will also force @vcpu to exit the guest and
+ * refresh the cache); and/or if the PFN used directly
+ * by KVM (and thus needs a kernel virtual mapping).
* @gpa: guest physical address to map.
* @len: sanity check; the range being access must fit a single page.
- * @dirty: mark the cache dirty immediately.
*
* @return: 0 for success.
* -EINVAL for a mapping which would cross a page boundary.
* -EFAULT for an untranslatable guest physical address.
*
* This primes a gfn_to_pfn_cache and links it into the @kvm's list for
- * invalidations to be processed. Invalidation callbacks to @vcpu using
- * %KVM_REQ_GPC_INVALIDATE will occur only for MMU notifiers, not for KVM
- * memslot changes. Callers are required to use kvm_gfn_to_pfn_cache_check()
- * to ensure that the cache is valid before accessing the target page.
+ * invalidations to be processed. Callers are required to use
+ * kvm_gfn_to_pfn_cache_check() to ensure that the cache is valid before
+ * accessing the target page.
*/
int kvm_gfn_to_pfn_cache_init(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
- struct kvm_vcpu *vcpu, bool guest_uses_pa,
- bool kernel_map, gpa_t gpa, unsigned long len,
- bool dirty);
+ struct kvm_vcpu *vcpu, enum pfn_cache_usage usage,
+ gpa_t gpa, unsigned long len);
/**
* kvm_gfn_to_pfn_cache_check - check validity of a gfn_to_pfn_cache.
@@ -1250,7 +1260,6 @@ int kvm_gfn_to_pfn_cache_init(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
* @gpc: struct gfn_to_pfn_cache object.
* @gpa: current guest physical address to map.
* @len: sanity check; the range being access must fit a single page.
- * @dirty: mark the cache dirty immediately.
*
* @return: %true if the cache is still valid and the address matches.
* %false if the cache is not valid.
@@ -1272,7 +1281,6 @@ bool kvm_gfn_to_pfn_cache_check(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
* @gpc: struct gfn_to_pfn_cache object.
* @gpa: updated guest physical address to map.
* @len: sanity check; the range being access must fit a single page.
- * @dirty: mark the cache dirty immediately.
*
* @return: 0 for success.
* -EINVAL for a mapping which would cross a page boundary.
@@ -1285,7 +1293,7 @@ bool kvm_gfn_to_pfn_cache_check(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
* with the lock still held to permit access.
*/
int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
- gpa_t gpa, unsigned long len, bool dirty);
+ gpa_t gpa, unsigned long len);
/**
* kvm_gfn_to_pfn_cache_unmap - temporarily unmap a gfn_to_pfn_cache.
@@ -1293,10 +1301,9 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
* @kvm: pointer to kvm instance.
* @gpc: struct gfn_to_pfn_cache object.
*
- * This unmaps the referenced page and marks it dirty, if appropriate. The
- * cache is left in the invalid state but at least the mapping from GPA to
- * userspace HVA will remain cached and can be reused on a subsequent
- * refresh.
+ * This unmaps the referenced page. The cache is left in the invalid state
+ * but at least the mapping from GPA to userspace HVA will remain cached
+ * and can be reused on a subsequent refresh.
*/
void kvm_gfn_to_pfn_cache_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc);
@@ -1984,7 +1991,7 @@ static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
void kvm_arch_irq_routing_update(struct kvm *kvm);
-static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
+static inline void __kvm_make_request(int req, struct kvm_vcpu *vcpu)
{
/*
* Ensure the rest of the request is published to kvm_check_request's
@@ -1994,6 +2001,19 @@ static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
set_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests);
}
+static __always_inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
+{
+ /*
+ * Request that don't require vCPU action should never be logged in
+ * vcpu->requests. The vCPU won't clear the request, so it will stay
+ * logged indefinitely and prevent the vCPU from entering the guest.
+ */
+ BUILD_BUG_ON(!__builtin_constant_p(req) ||
+ (req & KVM_REQUEST_NO_ACTION));
+
+ __kvm_make_request(req, vcpu);
+}
+
static inline bool kvm_request_pending(struct kvm_vcpu *vcpu)
{
return READ_ONCE(vcpu->requests);
diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
index dceac12c1ce5..ac1ebb37a0ff 100644
--- a/include/linux/kvm_types.h
+++ b/include/linux/kvm_types.h
@@ -18,6 +18,7 @@ struct kvm_memslots;
enum kvm_mr_change;
+#include <linux/bits.h>
#include <linux/types.h>
#include <linux/spinlock_types.h>
@@ -46,6 +47,12 @@ typedef u64 hfn_t;
typedef hfn_t kvm_pfn_t;
+enum pfn_cache_usage {
+ KVM_GUEST_USES_PFN = BIT(0),
+ KVM_HOST_USES_PFN = BIT(1),
+ KVM_GUEST_AND_HOST_USE_PFN = KVM_GUEST_USES_PFN | KVM_HOST_USES_PFN,
+};
+
struct gfn_to_hva_cache {
u64 generation;
gpa_t gpa;
@@ -64,11 +71,9 @@ struct gfn_to_pfn_cache {
rwlock_t lock;
void *khva;
kvm_pfn_t pfn;
+ enum pfn_cache_usage usage;
bool active;
bool valid;
- bool dirty;
- bool kernel_map;
- bool guest_uses_pa;
};
#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
diff --git a/include/linux/mc146818rtc.h b/include/linux/mc146818rtc.h
index 808bb4cee230..b0da04fe087b 100644
--- a/include/linux/mc146818rtc.h
+++ b/include/linux/mc146818rtc.h
@@ -86,6 +86,8 @@ struct cmos_rtc_board_info {
/* 2 values for divider stage reset, others for "testing purposes only" */
# define RTC_DIV_RESET1 0x60
# define RTC_DIV_RESET2 0x70
+ /* In AMD BKDG bit 5 and 6 are reserved, bit 4 is for select dv0 bank */
+# define RTC_AMD_BANK_SELECT 0x10
/* Periodic intr. / Square wave rate select. 0=none, 1=32.8kHz,... 15=2Hz */
# define RTC_RATE_SELECT 0x0F
diff --git a/include/linux/net.h b/include/linux/net.h
index ba736b457a06..12093f4db50c 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -125,6 +125,25 @@ struct socket {
struct socket_wq wq;
};
+/*
+ * "descriptor" for what we're up to with a read.
+ * This allows us to use the same read code yet
+ * have multiple different users of the data that
+ * we read from a file.
+ *
+ * The simplest case just copies the data to user
+ * mode.
+ */
+typedef struct {
+ size_t written;
+ size_t count;
+ union {
+ char __user *buf;
+ void *data;
+ } arg;
+ int error;
+} read_descriptor_t;
+
struct vm_area_struct;
struct page;
struct sockaddr;
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index 4f44f83817a9..f626a445d1a8 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -346,6 +346,7 @@ enum {
NVME_CTRL_ONCS_TIMESTAMP = 1 << 6,
NVME_CTRL_VWC_PRESENT = 1 << 0,
NVME_CTRL_OACS_SEC_SUPP = 1 << 0,
+ NVME_CTRL_OACS_NS_MNGT_SUPP = 1 << 3,
NVME_CTRL_OACS_DIRECTIVES = 1 << 5,
NVME_CTRL_OACS_DBBUF_SUPP = 1 << 8,
NVME_CTRL_LPA_CMD_EFFECTS_LOG = 1 << 1,
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index a8d0b327b066..993994cd943a 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -752,8 +752,6 @@ struct page *read_cache_page(struct address_space *, pgoff_t index,
filler_t *filler, void *data);
extern struct page * read_cache_page_gfp(struct address_space *mapping,
pgoff_t index, gfp_t gfp_mask);
-extern int read_cache_pages(struct address_space *mapping,
- struct list_head *pages, filler_t *filler, void *data);
static inline struct page *read_mapping_page(struct address_space *mapping,
pgoff_t index, struct file *file)
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index 47fd1c2d3a57..1fd9c6a21ebe 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -110,8 +110,6 @@ struct rtc_device {
struct hrtimer pie_timer; /* sub second exp, so needs hrtimer */
int pie_enabled;
struct work_struct irqwork;
- /* Some hardware can't support UIE mode */
- int uie_unsupported;
/*
* This offset specifies the update timing of the RTC.
diff --git a/include/linux/rtc/ds1685.h b/include/linux/rtc/ds1685.h
index 67ee9d20cc5a..5a41c3bbcbe3 100644
--- a/include/linux/rtc/ds1685.h
+++ b/include/linux/rtc/ds1685.h
@@ -46,7 +46,6 @@ struct ds1685_priv {
u32 regstep;
int irq_num;
bool bcd_mode;
- bool no_irq;
u8 (*read)(struct ds1685_priv *, int);
void (*write)(struct ds1685_priv *, int, u8);
void (*prepare_poweroff)(void);
diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h
index dffeb8281c2d..8f5a86e210b9 100644
--- a/include/linux/sbitmap.h
+++ b/include/linux/sbitmap.h
@@ -174,7 +174,7 @@ static inline unsigned int __map_depth(const struct sbitmap *sb, int index)
static inline void sbitmap_free(struct sbitmap *sb)
{
free_percpu(sb->alloc_hint);
- kfree(sb->map);
+ kvfree(sb->map);
sb->map = NULL;
}
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index 88cc16444b43..60820ab511d2 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -162,6 +162,7 @@ int seq_dentry(struct seq_file *, struct dentry *, const char *);
int seq_path_root(struct seq_file *m, const struct path *path,
const struct path *root, const char *esc);
+void *single_start(struct seq_file *, loff_t *);
int single_open(struct file *, int (*)(struct seq_file *, void *), void *);
int single_open_size(struct file *, int (*)(struct seq_file *, void *), void *, size_t);
int single_release(struct inode *, struct file *);
diff --git a/include/uapi/linux/user_events.h b/include/linux/user_events.h
index e570840571e1..e570840571e1 100644
--- a/include/uapi/linux/user_events.h
+++ b/include/linux/user_events.h
diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index bb52b786be1b..72feab5ea8d4 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -9,6 +9,7 @@
* See Documentation/core-api/xarray.rst for how to use the XArray.
*/
+#include <linux/bitmap.h>
#include <linux/bug.h>
#include <linux/compiler.h>
#include <linux/gfp.h>
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index 314f2779cab5..6b99310b5b88 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -402,6 +402,7 @@ struct snd_pcm_runtime {
struct fasync_struct *fasync;
bool stop_operating; /* sync_stop will be called */
struct mutex buffer_mutex; /* protect for buffer changes */
+ atomic_t buffer_accessing; /* >0: in r/w operation, <0: blocked */
/* -- private section -- */
void *private_data;
diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
index d2be4eb22008..784adc6f6ed2 100644
--- a/include/uapi/linux/io_uring.h
+++ b/include/uapi/linux/io_uring.h
@@ -201,11 +201,9 @@ struct io_uring_cqe {
*
* IORING_CQE_F_BUFFER If set, the upper 16 bits are the buffer ID
* IORING_CQE_F_MORE If set, parent SQE will generate more CQE entries
- * IORING_CQE_F_MSG If set, CQE was generated with IORING_OP_MSG_RING
*/
#define IORING_CQE_F_BUFFER (1U << 0)
#define IORING_CQE_F_MORE (1U << 1)
-#define IORING_CQE_F_MSG (1U << 2)
enum {
IORING_CQE_BUFFER_SHIFT = 16,
diff --git a/include/uapi/linux/loop.h b/include/uapi/linux/loop.h
index 24a1c45bd1ae..98e60801195e 100644
--- a/include/uapi/linux/loop.h
+++ b/include/uapi/linux/loop.h
@@ -45,7 +45,7 @@ struct loop_info {
unsigned long lo_inode; /* ioctl r/o */
__kernel_old_dev_t lo_rdevice; /* ioctl r/o */
int lo_offset;
- int lo_encrypt_type;
+ int lo_encrypt_type; /* obsolete, ignored */
int lo_encrypt_key_size; /* ioctl w/o */
int lo_flags;
char lo_name[LO_NAME_SIZE];
@@ -61,7 +61,7 @@ struct loop_info64 {
__u64 lo_offset;
__u64 lo_sizelimit;/* bytes, 0 == max available */
__u32 lo_number; /* ioctl r/o */
- __u32 lo_encrypt_type;
+ __u32 lo_encrypt_type; /* obsolete, ignored */
__u32 lo_encrypt_key_size; /* ioctl w/o */
__u32 lo_flags;
__u8 lo_file_name[LO_NAME_SIZE];
diff --git a/include/uapi/linux/rtc.h b/include/uapi/linux/rtc.h
index 03e5b776e597..97aca4503a6a 100644
--- a/include/uapi/linux/rtc.h
+++ b/include/uapi/linux/rtc.h
@@ -133,7 +133,8 @@ struct rtc_param {
#define RTC_FEATURE_UPDATE_INTERRUPT 4
#define RTC_FEATURE_CORRECTION 5
#define RTC_FEATURE_BACKUP_SWITCH_MODE 6
-#define RTC_FEATURE_CNT 7
+#define RTC_FEATURE_ALARM_WAKEUP_ONLY 7
+#define RTC_FEATURE_CNT 8
/* parameter list */
#define RTC_PARAM_FEATURES 0
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 9bb54c0b3b2d..2c43e327a619 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -767,6 +767,7 @@ config USER_EVENTS
bool "User trace events"
select TRACING
select DYNAMIC_EVENTS
+ depends on BROKEN || COMPILE_TEST # API needs to be straighten out
help
User trace events are user-defined trace events that
can be used like an existing kernel trace event. User trace
diff --git a/kernel/trace/trace_events_user.c b/kernel/trace/trace_events_user.c
index 8b3d241a31c2..68d62bfac12f 100644
--- a/kernel/trace/trace_events_user.c
+++ b/kernel/trace/trace_events_user.c
@@ -18,7 +18,12 @@
#include <linux/tracefs.h>
#include <linux/types.h>
#include <linux/uaccess.h>
+/* Reminder to move to uapi when everything works */
+#ifdef CONFIG_COMPILE_TEST
+#include <linux/user_events.h>
+#else
#include <uapi/linux/user_events.h>
+#endif
#include "trace.h"
#include "trace_dynevent.h"
diff --git a/kernel/watch_queue.c b/kernel/watch_queue.c
index 3990e4df3d7b..230038d4f908 100644
--- a/kernel/watch_queue.c
+++ b/kernel/watch_queue.c
@@ -370,6 +370,7 @@ static void __put_watch_queue(struct kref *kref)
for (i = 0; i < wqueue->nr_pages; i++)
__free_page(wqueue->notes[i]);
+ kfree(wqueue->notes);
bitmap_free(wqueue->notes_bitmap);
wfilter = rcu_access_pointer(wqueue->filter);
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index 2eb3de18ded3..ae4fd4de9ebe 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -110,7 +110,7 @@ int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
sb->alloc_hint = NULL;
}
- sb->map = kcalloc_node(sb->map_nr, sizeof(*sb->map), flags, node);
+ sb->map = kvzalloc_node(sb->map_nr * sizeof(*sb->map), flags, node);
if (!sb->map) {
free_percpu(sb->alloc_hint);
return -ENOMEM;
diff --git a/lib/test_xarray.c b/lib/test_xarray.c
index 8b1c318189ce..e77d4856442c 100644
--- a/lib/test_xarray.c
+++ b/lib/test_xarray.c
@@ -1463,6 +1463,25 @@ unlock:
XA_BUG_ON(xa, !xa_empty(xa));
}
+static noinline void check_create_range_5(struct xarray *xa,
+ unsigned long index, unsigned int order)
+{
+ XA_STATE_ORDER(xas, xa, index, order);
+ unsigned int i;
+
+ xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL);
+
+ for (i = 0; i < order + 10; i++) {
+ do {
+ xas_lock(&xas);
+ xas_create_range(&xas);
+ xas_unlock(&xas);
+ } while (xas_nomem(&xas, GFP_KERNEL));
+ }
+
+ xa_destroy(xa);
+}
+
static noinline void check_create_range(struct xarray *xa)
{
unsigned int order;
@@ -1490,6 +1509,9 @@ static noinline void check_create_range(struct xarray *xa)
check_create_range_4(xa, (3U << order) + 1, order);
check_create_range_4(xa, (3U << order) - 1, order);
check_create_range_4(xa, (1U << 24) + 1, order);
+
+ check_create_range_5(xa, 0, order);
+ check_create_range_5(xa, (1U << order), order);
}
check_create_range_3();
diff --git a/lib/xarray.c b/lib/xarray.c
index b95e92598b9c..4acc88ea7c21 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -722,6 +722,8 @@ void xas_create_range(struct xa_state *xas)
for (;;) {
struct xa_node *node = xas->xa_node;
+ if (node->shift >= shift)
+ break;
xas->xa_node = xa_parent_locked(xas->xa, node);
xas->xa_offset = node->offset - 1;
if (node->offset != 0)
@@ -1079,6 +1081,7 @@ void xas_split(struct xa_state *xas, void *entry, unsigned int order)
xa_mk_node(child));
if (xa_is_value(curr))
values--;
+ xas_update(xas, child);
} else {
unsigned int canon = offset - xas->xa_sibs;
@@ -1093,6 +1096,7 @@ void xas_split(struct xa_state *xas, void *entry, unsigned int order)
} while (offset-- > xas->xa_offset);
node->nr_values += values;
+ xas_update(xas, node);
}
EXPORT_SYMBOL_GPL(xas_split);
#endif
diff --git a/mm/damon/core.c b/mm/damon/core.c
index c1e0fed4e877..5ce8d7c867f0 100644
--- a/mm/damon/core.c
+++ b/mm/damon/core.c
@@ -1019,12 +1019,15 @@ static int kdamond_wait_activation(struct damon_ctx *ctx)
struct damos *s;
unsigned long wait_time;
unsigned long min_wait_time = 0;
+ bool init_wait_time = false;
while (!kdamond_need_stop(ctx)) {
damon_for_each_scheme(s, ctx) {
wait_time = damos_wmark_wait_us(s);
- if (!min_wait_time || wait_time < min_wait_time)
+ if (!init_wait_time || wait_time < min_wait_time) {
+ init_wait_time = true;
min_wait_time = wait_time;
+ }
}
if (!min_wait_time)
return 0;
diff --git a/mm/filemap.c b/mm/filemap.c
index 647d72bf23b6..3a5ffb5587cd 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2538,7 +2538,7 @@ static int filemap_create_folio(struct file *file,
* the page cache as the locked folio would then be enough to
* synchronize with hole punching. But there are code paths
* such as filemap_update_page() filling in partially uptodate
- * pages or ->readpages() that need to hold invalidate_lock
+ * pages or ->readahead() that need to hold invalidate_lock
* while mapping blocks for IO so let's hold the lock here as
* well to keep locking rules simple.
*/
@@ -3752,9 +3752,10 @@ out:
}
EXPORT_SYMBOL(generic_file_direct_write);
-ssize_t generic_perform_write(struct file *file,
- struct iov_iter *i, loff_t pos)
+ssize_t generic_perform_write(struct kiocb *iocb, struct iov_iter *i)
{
+ struct file *file = iocb->ki_filp;
+ loff_t pos = iocb->ki_pos;
struct address_space *mapping = file->f_mapping;
const struct address_space_operations *a_ops = mapping->a_ops;
long status = 0;
@@ -3884,7 +3885,8 @@ ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
if (written < 0 || !iov_iter_count(from) || IS_DAX(inode))
goto out;
- status = generic_perform_write(file, from, pos = iocb->ki_pos);
+ pos = iocb->ki_pos;
+ status = generic_perform_write(iocb, from);
/*
* If generic_perform_write() returned a synchronous error
* then we want to return the number of bytes which were
@@ -3916,7 +3918,7 @@ ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
*/
}
} else {
- written = generic_perform_write(file, from, iocb->ki_pos);
+ written = generic_perform_write(iocb, from);
if (likely(written > 0))
iocb->ki_pos += written;
}
diff --git a/mm/gup.c b/mm/gup.c
index 271fbe8195d7..f598a037eb04 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1404,6 +1404,7 @@ long populate_vma_page_range(struct vm_area_struct *vma,
struct mm_struct *mm = vma->vm_mm;
unsigned long nr_pages = (end - start) / PAGE_SIZE;
int gup_flags;
+ long ret;
VM_BUG_ON(!PAGE_ALIGNED(start));
VM_BUG_ON(!PAGE_ALIGNED(end));
@@ -1438,8 +1439,10 @@ long populate_vma_page_range(struct vm_area_struct *vma,
* We made sure addr is within a VMA, so the following will
* not result in a stack expansion that recurses back here.
*/
- return __get_user_pages(mm, start, nr_pages, gup_flags,
+ ret = __get_user_pages(mm, start, nr_pages, gup_flags,
NULL, NULL, locked);
+ lru_add_drain();
+ return ret;
}
/*
@@ -1471,6 +1474,7 @@ long faultin_vma_page_range(struct vm_area_struct *vma, unsigned long start,
struct mm_struct *mm = vma->vm_mm;
unsigned long nr_pages = (end - start) / PAGE_SIZE;
int gup_flags;
+ long ret;
VM_BUG_ON(!PAGE_ALIGNED(start));
VM_BUG_ON(!PAGE_ALIGNED(end));
@@ -1498,8 +1502,10 @@ long faultin_vma_page_range(struct vm_area_struct *vma, unsigned long start,
if (check_vma_flags(vma, gup_flags))
return -EINVAL;
- return __get_user_pages(mm, start, nr_pages, gup_flags,
+ ret = __get_user_pages(mm, start, nr_pages, gup_flags,
NULL, NULL, locked);
+ lru_add_drain();
+ return ret;
}
/*
diff --git a/mm/internal.h b/mm/internal.h
index 58dc6adc19c5..cf16280ce132 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -456,7 +456,8 @@ static inline void munlock_vma_page(struct page *page,
}
void mlock_new_page(struct page *page);
bool need_mlock_page_drain(int cpu);
-void mlock_page_drain(int cpu);
+void mlock_page_drain_local(void);
+void mlock_page_drain_remote(int cpu);
extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
@@ -539,7 +540,8 @@ static inline void munlock_vma_page(struct page *page,
struct vm_area_struct *vma, bool compound) { }
static inline void mlock_new_page(struct page *page) { }
static inline bool need_mlock_page_drain(int cpu) { return false; }
-static inline void mlock_page_drain(int cpu) { }
+static inline void mlock_page_drain_local(void) { }
+static inline void mlock_page_drain_remote(int cpu) { }
static inline void vunmap_range_noflush(unsigned long start, unsigned long end)
{
}
diff --git a/mm/kfence/core.c b/mm/kfence/core.c
index 2f9fdfde1941..a203747ad2c0 100644
--- a/mm/kfence/core.c
+++ b/mm/kfence/core.c
@@ -566,6 +566,8 @@ static unsigned long kfence_init_pool(void)
* enters __slab_free() slow-path.
*/
for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
+ struct slab *slab = page_slab(&pages[i]);
+
if (!i || (i % 2))
continue;
@@ -573,7 +575,11 @@ static unsigned long kfence_init_pool(void)
if (WARN_ON(compound_head(&pages[i]) != &pages[i]))
return addr;
- __SetPageSlab(&pages[i]);
+ __folio_set_slab(slab_folio(slab));
+#ifdef CONFIG_MEMCG
+ slab->memcg_data = (unsigned long)&kfence_metadata[i / 2 - 1].objcg |
+ MEMCG_DATA_OBJCGS;
+#endif
}
/*
@@ -1033,6 +1039,9 @@ void __kfence_free(void *addr)
{
struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
+#ifdef CONFIG_MEMCG
+ KFENCE_WARN_ON(meta->objcg);
+#endif
/*
* If the objects of the cache are SLAB_TYPESAFE_BY_RCU, defer freeing
* the object, as the object page may be recycled for other-typed
diff --git a/mm/kfence/kfence.h b/mm/kfence/kfence.h
index 2a2d5de9d379..9a6c4b1b12a8 100644
--- a/mm/kfence/kfence.h
+++ b/mm/kfence/kfence.h
@@ -89,6 +89,9 @@ struct kfence_metadata {
struct kfence_track free_track;
/* For updating alloc_covered on frees. */
u32 alloc_stack_hash;
+#ifdef CONFIG_MEMCG
+ struct obj_cgroup *objcg;
+#endif
};
extern struct kfence_metadata kfence_metadata[CONFIG_KFENCE_NUM_OBJECTS];
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 7580baa76af1..acd7cbb82e16 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -796,6 +796,8 @@ static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
unsigned long flags;
struct kmemleak_object *object;
struct kmemleak_scan_area *area = NULL;
+ unsigned long untagged_ptr;
+ unsigned long untagged_objp;
object = find_and_get_object(ptr, 1);
if (!object) {
@@ -804,6 +806,9 @@ static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
return;
}
+ untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
+ untagged_objp = (unsigned long)kasan_reset_tag((void *)object->pointer);
+
if (scan_area_cache)
area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp));
@@ -815,8 +820,8 @@ static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
goto out_unlock;
}
if (size == SIZE_MAX) {
- size = object->pointer + object->size - ptr;
- } else if (ptr + size > object->pointer + object->size) {
+ size = untagged_objp + object->size - untagged_ptr;
+ } else if (untagged_ptr + size > untagged_objp + object->size) {
kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
dump_object_info(object);
kmem_cache_free(scan_area_cache, area);
diff --git a/mm/madvise.c b/mm/madvise.c
index b41858ee937b..1873616a37d2 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -1464,16 +1464,9 @@ SYSCALL_DEFINE5(process_madvise, int, pidfd, const struct iovec __user *, vec,
while (iov_iter_count(&iter)) {
iovec = iov_iter_iovec(&iter);
- /*
- * do_madvise returns ENOMEM if unmapped holes are present
- * in the passed VMA. process_madvise() is expected to skip
- * unmapped holes passed to it in the 'struct iovec' list
- * and not fail because of them. Thus treat -ENOMEM return
- * from do_madvise as valid and continue processing.
- */
ret = do_madvise(mm, (unsigned long)iovec.iov_base,
iovec.iov_len, behavior);
- if (ret < 0 && ret != -ENOMEM)
+ if (ret < 0)
break;
iov_iter_advance(&iter, iovec.iov_len);
}
diff --git a/mm/memory.c b/mm/memory.c
index be44d0b36b18..76e3af9639d9 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3918,14 +3918,18 @@ static vm_fault_t __do_fault(struct vm_fault *vmf)
return ret;
if (unlikely(PageHWPoison(vmf->page))) {
+ struct page *page = vmf->page;
vm_fault_t poisonret = VM_FAULT_HWPOISON;
if (ret & VM_FAULT_LOCKED) {
+ if (page_mapped(page))
+ unmap_mapping_pages(page_mapping(page),
+ page->index, 1, false);
/* Retry if a clean page was removed from the cache. */
- if (invalidate_inode_page(vmf->page))
- poisonret = 0;
- unlock_page(vmf->page);
+ if (invalidate_inode_page(page))
+ poisonret = VM_FAULT_NOPAGE;
+ unlock_page(page);
}
- put_page(vmf->page);
+ put_page(page);
vmf->page = NULL;
return poisonret;
}
diff --git a/mm/migrate.c b/mm/migrate.c
index 3d60823afd2d..de175e2fdba5 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -246,7 +246,7 @@ static bool remove_migration_pte(struct folio *folio,
set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
}
if (vma->vm_flags & VM_LOCKED)
- mlock_page_drain(smp_processor_id());
+ mlock_page_drain_local();
trace_remove_migration_pte(pvmw.address, pte_val(pte),
compound_order(new));
diff --git a/mm/mlock.c b/mm/mlock.c
index 529fbc1f27c8..716caf851043 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -28,7 +28,14 @@
#include "internal.h"
-static DEFINE_PER_CPU(struct pagevec, mlock_pvec);
+struct mlock_pvec {
+ local_lock_t lock;
+ struct pagevec vec;
+};
+
+static DEFINE_PER_CPU(struct mlock_pvec, mlock_pvec) = {
+ .lock = INIT_LOCAL_LOCK(lock),
+};
bool can_do_mlock(void)
{
@@ -203,18 +210,30 @@ static void mlock_pagevec(struct pagevec *pvec)
pagevec_reinit(pvec);
}
-void mlock_page_drain(int cpu)
+void mlock_page_drain_local(void)
+{
+ struct pagevec *pvec;
+
+ local_lock(&mlock_pvec.lock);
+ pvec = this_cpu_ptr(&mlock_pvec.vec);
+ if (pagevec_count(pvec))
+ mlock_pagevec(pvec);
+ local_unlock(&mlock_pvec.lock);
+}
+
+void mlock_page_drain_remote(int cpu)
{
struct pagevec *pvec;
- pvec = &per_cpu(mlock_pvec, cpu);
+ WARN_ON_ONCE(cpu_online(cpu));
+ pvec = &per_cpu(mlock_pvec.vec, cpu);
if (pagevec_count(pvec))
mlock_pagevec(pvec);
}
bool need_mlock_page_drain(int cpu)
{
- return pagevec_count(&per_cpu(mlock_pvec, cpu));
+ return pagevec_count(&per_cpu(mlock_pvec.vec, cpu));
}
/**
@@ -223,7 +242,10 @@ bool need_mlock_page_drain(int cpu)
*/
void mlock_folio(struct folio *folio)
{
- struct pagevec *pvec = &get_cpu_var(mlock_pvec);
+ struct pagevec *pvec;
+
+ local_lock(&mlock_pvec.lock);
+ pvec = this_cpu_ptr(&mlock_pvec.vec);
if (!folio_test_set_mlocked(folio)) {
int nr_pages = folio_nr_pages(folio);
@@ -236,7 +258,7 @@ void mlock_folio(struct folio *folio)
if (!pagevec_add(pvec, mlock_lru(&folio->page)) ||
folio_test_large(folio) || lru_cache_disabled())
mlock_pagevec(pvec);
- put_cpu_var(mlock_pvec);
+ local_unlock(&mlock_pvec.lock);
}
/**
@@ -245,9 +267,11 @@ void mlock_folio(struct folio *folio)
*/
void mlock_new_page(struct page *page)
{
- struct pagevec *pvec = &get_cpu_var(mlock_pvec);
+ struct pagevec *pvec;
int nr_pages = thp_nr_pages(page);
+ local_lock(&mlock_pvec.lock);
+ pvec = this_cpu_ptr(&mlock_pvec.vec);
SetPageMlocked(page);
mod_zone_page_state(page_zone(page), NR_MLOCK, nr_pages);
__count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages);
@@ -256,7 +280,7 @@ void mlock_new_page(struct page *page)
if (!pagevec_add(pvec, mlock_new(page)) ||
PageHead(page) || lru_cache_disabled())
mlock_pagevec(pvec);
- put_cpu_var(mlock_pvec);
+ local_unlock(&mlock_pvec.lock);
}
/**
@@ -265,8 +289,10 @@ void mlock_new_page(struct page *page)
*/
void munlock_page(struct page *page)
{
- struct pagevec *pvec = &get_cpu_var(mlock_pvec);
+ struct pagevec *pvec;
+ local_lock(&mlock_pvec.lock);
+ pvec = this_cpu_ptr(&mlock_pvec.vec);
/*
* TestClearPageMlocked(page) must be left to __munlock_page(),
* which will check whether the page is multiply mlocked.
@@ -276,7 +302,7 @@ void munlock_page(struct page *page)
if (!pagevec_add(pvec, page) ||
PageHead(page) || lru_cache_disabled())
mlock_pagevec(pvec);
- put_cpu_var(mlock_pvec);
+ local_unlock(&mlock_pvec.lock);
}
static int mlock_pte_range(pmd_t *pmd, unsigned long addr,
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 6c6af8658775..2db95780e003 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -8367,6 +8367,7 @@ static int page_alloc_cpu_dead(unsigned int cpu)
struct zone *zone;
lru_add_drain_cpu(cpu);
+ mlock_page_drain_remote(cpu);
drain_pages(cpu);
/*
diff --git a/mm/readahead.c b/mm/readahead.c
index d3a47546d17d..8e3775829513 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -13,29 +13,29 @@
*
* Readahead is used to read content into the page cache before it is
* explicitly requested by the application. Readahead only ever
- * attempts to read pages that are not yet in the page cache. If a
- * page is present but not up-to-date, readahead will not try to read
+ * attempts to read folios that are not yet in the page cache. If a
+ * folio is present but not up-to-date, readahead will not try to read
* it. In that case a simple ->readpage() will be requested.
*
* Readahead is triggered when an application read request (whether a
- * systemcall or a page fault) finds that the requested page is not in
+ * system call or a page fault) finds that the requested folio is not in
* the page cache, or that it is in the page cache and has the
- * %PG_readahead flag set. This flag indicates that the page was loaded
- * as part of a previous read-ahead request and now that it has been
- * accessed, it is time for the next read-ahead.
+ * readahead flag set. This flag indicates that the folio was read
+ * as part of a previous readahead request and now that it has been
+ * accessed, it is time for the next readahead.
*
* Each readahead request is partly synchronous read, and partly async
- * read-ahead. This is reflected in the struct file_ra_state which
- * contains ->size being to total number of pages, and ->async_size
- * which is the number of pages in the async section. The first page in
- * this async section will have %PG_readahead set as a trigger for a
- * subsequent read ahead. Once a series of sequential reads has been
+ * readahead. This is reflected in the struct file_ra_state which
+ * contains ->size being the total number of pages, and ->async_size
+ * which is the number of pages in the async section. The readahead
+ * flag will be set on the first folio in this async section to trigger
+ * a subsequent readahead. Once a series of sequential reads has been
* established, there should be no need for a synchronous component and
- * all read ahead request will be fully asynchronous.
+ * all readahead request will be fully asynchronous.
*
- * When either of the triggers causes a readahead, three numbers need to
- * be determined: the start of the region, the size of the region, and
- * the size of the async tail.
+ * When either of the triggers causes a readahead, three numbers need
+ * to be determined: the start of the region to read, the size of the
+ * region, and the size of the async tail.
*
* The start of the region is simply the first page address at or after
* the accessed address, which is not currently populated in the page
@@ -45,14 +45,14 @@
* was explicitly requested from the determined request size, unless
* this would be less than zero - then zero is used. NOTE THIS
* CALCULATION IS WRONG WHEN THE START OF THE REGION IS NOT THE ACCESSED
- * PAGE.
+ * PAGE. ALSO THIS CALCULATION IS NOT USED CONSISTENTLY.
*
* The size of the region is normally determined from the size of the
* previous readahead which loaded the preceding pages. This may be
* discovered from the struct file_ra_state for simple sequential reads,
* or from examining the state of the page cache when multiple
* sequential reads are interleaved. Specifically: where the readahead
- * was triggered by the %PG_readahead flag, the size of the previous
+ * was triggered by the readahead flag, the size of the previous
* readahead is assumed to be the number of pages from the triggering
* page to the start of the new readahead. In these cases, the size of
* the previous readahead is scaled, often doubled, for the new
@@ -65,52 +65,52 @@
* larger than the current request, and it is not scaled up, unless it
* is at the start of file.
*
- * In general read ahead is accelerated at the start of the file, as
+ * In general readahead is accelerated at the start of the file, as
* reads from there are often sequential. There are other minor
- * adjustments to the read ahead size in various special cases and these
+ * adjustments to the readahead size in various special cases and these
* are best discovered by reading the code.
*
- * The above calculation determines the readahead, to which any requested
- * read size may be added.
+ * The above calculation, based on the previous readahead size,
+ * determines the size of the readahead, to which any requested read
+ * size may be added.
*
* Readahead requests are sent to the filesystem using the ->readahead()
* address space operation, for which mpage_readahead() is a canonical
* implementation. ->readahead() should normally initiate reads on all
- * pages, but may fail to read any or all pages without causing an IO
+ * folios, but may fail to read any or all folios without causing an I/O
* error. The page cache reading code will issue a ->readpage() request
- * for any page which ->readahead() does not provided, and only an error
+ * for any folio which ->readahead() did not read, and only an error
* from this will be final.
*
- * ->readahead() will generally call readahead_page() repeatedly to get
- * each page from those prepared for read ahead. It may fail to read a
- * page by:
+ * ->readahead() will generally call readahead_folio() repeatedly to get
+ * each folio from those prepared for readahead. It may fail to read a
+ * folio by:
*
- * * not calling readahead_page() sufficiently many times, effectively
- * ignoring some pages, as might be appropriate if the path to
+ * * not calling readahead_folio() sufficiently many times, effectively
+ * ignoring some folios, as might be appropriate if the path to
* storage is congested.
*
- * * failing to actually submit a read request for a given page,
+ * * failing to actually submit a read request for a given folio,
* possibly due to insufficient resources, or
*
* * getting an error during subsequent processing of a request.
*
- * In the last two cases, the page should be unlocked to indicate that
- * the read attempt has failed. In the first case the page will be
- * unlocked by the caller.
+ * In the last two cases, the folio should be unlocked by the filesystem
+ * to indicate that the read attempt has failed. In the first case the
+ * folio will be unlocked by the VFS.
*
- * Those pages not in the final ``async_size`` of the request should be
+ * Those folios not in the final ``async_size`` of the request should be
* considered to be important and ->readahead() should not fail them due
* to congestion or temporary resource unavailability, but should wait
* for necessary resources (e.g. memory or indexing information) to
- * become available. Pages in the final ``async_size`` may be
+ * become available. Folios in the final ``async_size`` may be
* considered less urgent and failure to read them is more acceptable.
- * In this case it is best to use delete_from_page_cache() to remove the
- * pages from the page cache as is automatically done for pages that
- * were not fetched with readahead_page(). This will allow a
- * subsequent synchronous read ahead request to try them again. If they
+ * In this case it is best to use filemap_remove_folio() to remove the
+ * folios from the page cache as is automatically done for folios that
+ * were not fetched with readahead_folio(). This will allow a
+ * subsequent synchronous readahead request to try them again. If they
* are left in the page cache, then they will be read individually using
- * ->readpage().
- *
+ * ->readpage() which may be less efficient.
*/
#include <linux/kernel.h>
@@ -142,91 +142,14 @@ file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping)
}
EXPORT_SYMBOL_GPL(file_ra_state_init);
-/*
- * see if a page needs releasing upon read_cache_pages() failure
- * - the caller of read_cache_pages() may have set PG_private or PG_fscache
- * before calling, such as the NFS fs marking pages that are cached locally
- * on disk, thus we need to give the fs a chance to clean up in the event of
- * an error
- */
-static void read_cache_pages_invalidate_page(struct address_space *mapping,
- struct page *page)
-{
- if (page_has_private(page)) {
- if (!trylock_page(page))
- BUG();
- page->mapping = mapping;
- folio_invalidate(page_folio(page), 0, PAGE_SIZE);
- page->mapping = NULL;
- unlock_page(page);
- }
- put_page(page);
-}
-
-/*
- * release a list of pages, invalidating them first if need be
- */
-static void read_cache_pages_invalidate_pages(struct address_space *mapping,
- struct list_head *pages)
-{
- struct page *victim;
-
- while (!list_empty(pages)) {
- victim = lru_to_page(pages);
- list_del(&victim->lru);
- read_cache_pages_invalidate_page(mapping, victim);
- }
-}
-
-/**
- * read_cache_pages - populate an address space with some pages & start reads against them
- * @mapping: the address_space
- * @pages: The address of a list_head which contains the target pages. These
- * pages have their ->index populated and are otherwise uninitialised.
- * @filler: callback routine for filling a single page.
- * @data: private data for the callback routine.
- *
- * Hides the details of the LRU cache etc from the filesystems.
- *
- * Returns: %0 on success, error return by @filler otherwise
- */
-int read_cache_pages(struct address_space *mapping, struct list_head *pages,
- int (*filler)(void *, struct page *), void *data)
-{
- struct page *page;
- int ret = 0;
-
- while (!list_empty(pages)) {
- page = lru_to_page(pages);
- list_del(&page->lru);
- if (add_to_page_cache_lru(page, mapping, page->index,
- readahead_gfp_mask(mapping))) {
- read_cache_pages_invalidate_page(mapping, page);
- continue;
- }
- put_page(page);
-
- ret = filler(data, page);
- if (unlikely(ret)) {
- read_cache_pages_invalidate_pages(mapping, pages);
- break;
- }
- task_io_account_read(PAGE_SIZE);
- }
- return ret;
-}
-
-EXPORT_SYMBOL(read_cache_pages);
-
-static void read_pages(struct readahead_control *rac, struct list_head *pages,
- bool skip_page)
+static void read_pages(struct readahead_control *rac)
{
const struct address_space_operations *aops = rac->mapping->a_ops;
struct page *page;
struct blk_plug plug;
if (!readahead_count(rac))
- goto out;
+ return;
blk_start_plug(&plug);
@@ -234,7 +157,7 @@ static void read_pages(struct readahead_control *rac, struct list_head *pages,
aops->readahead(rac);
/*
* Clean up the remaining pages. The sizes in ->ra
- * maybe be used to size next read-ahead, so make sure
+ * may be used to size the next readahead, so make sure
* they accurately reflect what happened.
*/
while ((page = readahead_page(rac))) {
@@ -246,13 +169,6 @@ static void read_pages(struct readahead_control *rac, struct list_head *pages,
unlock_page(page);
put_page(page);
}
- } else if (aops->readpages) {
- aops->readpages(rac->file, rac->mapping, pages,
- readahead_count(rac));
- /* Clean up the remaining pages */
- put_pages_list(pages);
- rac->_index += rac->_nr_pages;
- rac->_nr_pages = 0;
} else {
while ((page = readahead_page(rac))) {
aops->readpage(rac->file, page);
@@ -262,12 +178,7 @@ static void read_pages(struct readahead_control *rac, struct list_head *pages,
blk_finish_plug(&plug);
- BUG_ON(pages && !list_empty(pages));
BUG_ON(readahead_count(rac));
-
-out:
- if (skip_page)
- rac->_index++;
}
/**
@@ -289,7 +200,6 @@ void page_cache_ra_unbounded(struct readahead_control *ractl,
{
struct address_space *mapping = ractl->mapping;
unsigned long index = readahead_index(ractl);
- LIST_HEAD(page_pool);
gfp_t gfp_mask = readahead_gfp_mask(mapping);
unsigned long i;
@@ -321,7 +231,8 @@ void page_cache_ra_unbounded(struct readahead_control *ractl,
* have a stable reference to this page, and it's
* not worth getting one just for that.
*/
- read_pages(ractl, &page_pool, true);
+ read_pages(ractl);
+ ractl->_index++;
i = ractl->_index + ractl->_nr_pages - index - 1;
continue;
}
@@ -329,13 +240,11 @@ void page_cache_ra_unbounded(struct readahead_control *ractl,
folio = filemap_alloc_folio(gfp_mask, 0);
if (!folio)
break;
- if (mapping->a_ops->readpages) {
- folio->index = index + i;
- list_add(&folio->lru, &page_pool);
- } else if (filemap_add_folio(mapping, folio, index + i,
+ if (filemap_add_folio(mapping, folio, index + i,
gfp_mask) < 0) {
folio_put(folio);
- read_pages(ractl, &page_pool, true);
+ read_pages(ractl);
+ ractl->_index++;
i = ractl->_index + ractl->_nr_pages - index - 1;
continue;
}
@@ -349,7 +258,7 @@ void page_cache_ra_unbounded(struct readahead_control *ractl,
* uptodate then the caller will launch readpage again, and
* will then handle the error.
*/
- read_pages(ractl, &page_pool, false);
+ read_pages(ractl);
filemap_invalidate_unlock_shared(mapping);
memalloc_nofs_restore(nofs);
}
@@ -394,8 +303,7 @@ void force_page_cache_ra(struct readahead_control *ractl,
struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
unsigned long max_pages, index;
- if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readpages &&
- !mapping->a_ops->readahead))
+ if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readahead))
return;
/*
@@ -512,7 +420,7 @@ static pgoff_t count_history_pages(struct address_space *mapping,
}
/*
- * page cache context based read-ahead
+ * page cache context based readahead
*/
static int try_context_readahead(struct address_space *mapping,
struct file_ra_state *ra,
@@ -624,7 +532,7 @@ void page_cache_ra_order(struct readahead_control *ractl,
ra->async_size += index - limit - 1;
}
- read_pages(ractl, NULL, false);
+ read_pages(ractl);
/*
* If there were already pages in the page cache, then we may have
@@ -763,9 +671,9 @@ void page_cache_sync_ra(struct readahead_control *ractl,
bool do_forced_ra = ractl->file && (ractl->file->f_mode & FMODE_RANDOM);
/*
- * Even if read-ahead is disabled, issue this request as read-ahead
+ * Even if readahead is disabled, issue this request as readahead
* as we'll need it to satisfy the requested range. The forced
- * read-ahead will do the right thing and limit the read to just the
+ * readahead will do the right thing and limit the read to just the
* requested range, which we'll set to 1 page for this case.
*/
if (!ractl->ra->ra_pages || blk_cgroup_congested()) {
@@ -781,7 +689,6 @@ void page_cache_sync_ra(struct readahead_control *ractl,
return;
}
- /* do read-ahead */
ondemand_readahead(ractl, NULL, req_count);
}
EXPORT_SYMBOL_GPL(page_cache_sync_ra);
@@ -789,7 +696,7 @@ EXPORT_SYMBOL_GPL(page_cache_sync_ra);
void page_cache_async_ra(struct readahead_control *ractl,
struct folio *folio, unsigned long req_count)
{
- /* no read-ahead */
+ /* no readahead */
if (!ractl->ra->ra_pages)
return;
@@ -804,7 +711,6 @@ void page_cache_async_ra(struct readahead_control *ractl,
if (blk_cgroup_congested())
return;
- /* do read-ahead */
ondemand_readahead(ractl, folio, req_count);
}
EXPORT_SYMBOL_GPL(page_cache_async_ra);
diff --git a/mm/rmap.c b/mm/rmap.c
index 5cb970d51f0a..fedb82371efe 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1683,7 +1683,7 @@ discard:
*/
page_remove_rmap(subpage, vma, folio_test_hugetlb(folio));
if (vma->vm_flags & VM_LOCKED)
- mlock_page_drain(smp_processor_id());
+ mlock_page_drain_local();
folio_put(folio);
}
@@ -1961,7 +1961,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
*/
page_remove_rmap(subpage, vma, folio_test_hugetlb(folio));
if (vma->vm_flags & VM_LOCKED)
- mlock_page_drain(smp_processor_id());
+ mlock_page_drain_local();
folio_put(folio);
}
diff --git a/mm/swap.c b/mm/swap.c
index bceff0cb559c..7e320ec08c6a 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -624,7 +624,6 @@ void lru_add_drain_cpu(int cpu)
pagevec_lru_move_fn(pvec, lru_lazyfree_fn);
activate_page_drain(cpu);
- mlock_page_drain(cpu);
}
/**
@@ -706,6 +705,7 @@ void lru_add_drain(void)
local_lock(&lru_pvecs.lock);
lru_add_drain_cpu(smp_processor_id());
local_unlock(&lru_pvecs.lock);
+ mlock_page_drain_local();
}
/*
@@ -720,6 +720,7 @@ static void lru_add_and_bh_lrus_drain(void)
lru_add_drain_cpu(smp_processor_id());
local_unlock(&lru_pvecs.lock);
invalidate_bh_lrus_cpu();
+ mlock_page_drain_local();
}
void lru_add_drain_cpu_zone(struct zone *zone)
@@ -728,6 +729,7 @@ void lru_add_drain_cpu_zone(struct zone *zone)
lru_add_drain_cpu(smp_processor_id());
drain_local_pages(zone);
local_unlock(&lru_pvecs.lock);
+ mlock_page_drain_local();
}
#ifdef CONFIG_SMP
diff --git a/sound/core/pcm.c b/sound/core/pcm.c
index edd9849210f2..977d54320a5c 100644
--- a/sound/core/pcm.c
+++ b/sound/core/pcm.c
@@ -970,6 +970,7 @@ int snd_pcm_attach_substream(struct snd_pcm *pcm, int stream,
runtime->status->state = SNDRV_PCM_STATE_OPEN;
mutex_init(&runtime->buffer_mutex);
+ atomic_set(&runtime->buffer_accessing, 0);
substream->runtime = runtime;
substream->private_data = pcm->private_data;
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index a40a35e51fad..1fc7c50ffa62 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -1906,11 +1906,9 @@ static int wait_for_avail(struct snd_pcm_substream *substream,
if (avail >= runtime->twake)
break;
snd_pcm_stream_unlock_irq(substream);
- mutex_unlock(&runtime->buffer_mutex);
tout = schedule_timeout(wait_time);
- mutex_lock(&runtime->buffer_mutex);
snd_pcm_stream_lock_irq(substream);
set_current_state(TASK_INTERRUPTIBLE);
switch (runtime->status->state) {
@@ -2221,7 +2219,6 @@ snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream,
nonblock = !!(substream->f_flags & O_NONBLOCK);
- mutex_lock(&runtime->buffer_mutex);
snd_pcm_stream_lock_irq(substream);
err = pcm_accessible_state(runtime);
if (err < 0)
@@ -2276,6 +2273,10 @@ snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream,
err = -EINVAL;
goto _end_unlock;
}
+ if (!atomic_inc_unless_negative(&runtime->buffer_accessing)) {
+ err = -EBUSY;
+ goto _end_unlock;
+ }
snd_pcm_stream_unlock_irq(substream);
if (!is_playback)
snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_CPU);
@@ -2284,6 +2285,7 @@ snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream,
if (is_playback)
snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_DEVICE);
snd_pcm_stream_lock_irq(substream);
+ atomic_dec(&runtime->buffer_accessing);
if (err < 0)
goto _end_unlock;
err = pcm_accessible_state(runtime);
@@ -2313,7 +2315,6 @@ snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream,
if (xfer > 0 && err >= 0)
snd_pcm_update_state(substream, runtime);
snd_pcm_stream_unlock_irq(substream);
- mutex_unlock(&runtime->buffer_mutex);
return xfer > 0 ? (snd_pcm_sframes_t)xfer : err;
}
EXPORT_SYMBOL(__snd_pcm_lib_xfer);
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 704fdc9ebf91..4adaee62ef33 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -685,6 +685,24 @@ static int snd_pcm_hw_params_choose(struct snd_pcm_substream *pcm,
return 0;
}
+/* acquire buffer_mutex; if it's in r/w operation, return -EBUSY, otherwise
+ * block the further r/w operations
+ */
+static int snd_pcm_buffer_access_lock(struct snd_pcm_runtime *runtime)
+{
+ if (!atomic_dec_unless_positive(&runtime->buffer_accessing))
+ return -EBUSY;
+ mutex_lock(&runtime->buffer_mutex);
+ return 0; /* keep buffer_mutex, unlocked by below */
+}
+
+/* release buffer_mutex and clear r/w access flag */
+static void snd_pcm_buffer_access_unlock(struct snd_pcm_runtime *runtime)
+{
+ mutex_unlock(&runtime->buffer_mutex);
+ atomic_inc(&runtime->buffer_accessing);
+}
+
#if IS_ENABLED(CONFIG_SND_PCM_OSS)
#define is_oss_stream(substream) ((substream)->oss.oss)
#else
@@ -695,14 +713,16 @@ static int snd_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_pcm_runtime *runtime;
- int err = 0, usecs;
+ int err, usecs;
unsigned int bits;
snd_pcm_uframes_t frames;
if (PCM_RUNTIME_CHECK(substream))
return -ENXIO;
runtime = substream->runtime;
- mutex_lock(&runtime->buffer_mutex);
+ err = snd_pcm_buffer_access_lock(runtime);
+ if (err < 0)
+ return err;
snd_pcm_stream_lock_irq(substream);
switch (runtime->status->state) {
case SNDRV_PCM_STATE_OPEN:
@@ -820,7 +840,7 @@ static int snd_pcm_hw_params(struct snd_pcm_substream *substream,
snd_pcm_lib_free_pages(substream);
}
unlock:
- mutex_unlock(&runtime->buffer_mutex);
+ snd_pcm_buffer_access_unlock(runtime);
return err;
}
@@ -865,7 +885,9 @@ static int snd_pcm_hw_free(struct snd_pcm_substream *substream)
if (PCM_RUNTIME_CHECK(substream))
return -ENXIO;
runtime = substream->runtime;
- mutex_lock(&runtime->buffer_mutex);
+ result = snd_pcm_buffer_access_lock(runtime);
+ if (result < 0)
+ return result;
snd_pcm_stream_lock_irq(substream);
switch (runtime->status->state) {
case SNDRV_PCM_STATE_SETUP:
@@ -884,7 +906,7 @@ static int snd_pcm_hw_free(struct snd_pcm_substream *substream)
snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN);
cpu_latency_qos_remove_request(&substream->latency_pm_qos_req);
unlock:
- mutex_unlock(&runtime->buffer_mutex);
+ snd_pcm_buffer_access_unlock(runtime);
return result;
}
@@ -1369,12 +1391,15 @@ static int snd_pcm_action_nonatomic(const struct action_ops *ops,
/* Guarantee the group members won't change during non-atomic action */
down_read(&snd_pcm_link_rwsem);
- mutex_lock(&substream->runtime->buffer_mutex);
+ res = snd_pcm_buffer_access_lock(substream->runtime);
+ if (res < 0)
+ goto unlock;
if (snd_pcm_stream_linked(substream))
res = snd_pcm_action_group(ops, substream, state, false);
else
res = snd_pcm_action_single(ops, substream, state);
- mutex_unlock(&substream->runtime->buffer_mutex);
+ snd_pcm_buffer_access_unlock(substream->runtime);
+ unlock:
up_read(&snd_pcm_link_rwsem);
return res;
}
diff --git a/sound/isa/cs423x/cs4236.c b/sound/isa/cs423x/cs4236.c
index b6bdebd9ef27..10112e1bb25d 100644
--- a/sound/isa/cs423x/cs4236.c
+++ b/sound/isa/cs423x/cs4236.c
@@ -494,7 +494,7 @@ static int snd_cs423x_pnpbios_detect(struct pnp_dev *pdev,
static int dev;
int err;
struct snd_card *card;
- struct pnp_dev *cdev;
+ struct pnp_dev *cdev, *iter;
char cid[PNP_ID_LEN];
if (pnp_device_is_isapnp(pdev))
@@ -510,9 +510,11 @@ static int snd_cs423x_pnpbios_detect(struct pnp_dev *pdev,
strcpy(cid, pdev->id[0].id);
cid[5] = '1';
cdev = NULL;
- list_for_each_entry(cdev, &(pdev->protocol->devices), protocol_list) {
- if (!strcmp(cdev->id[0].id, cid))
+ list_for_each_entry(iter, &(pdev->protocol->devices), protocol_list) {
+ if (!strcmp(iter->id[0].id, cid)) {
+ cdev = iter;
break;
+ }
}
err = snd_cs423x_card_new(&pdev->dev, dev, &card);
if (err < 0)
diff --git a/sound/pci/hda/patch_cs8409-tables.c b/sound/pci/hda/patch_cs8409-tables.c
index 2d1fa706327b..74c50ec040d9 100644
--- a/sound/pci/hda/patch_cs8409-tables.c
+++ b/sound/pci/hda/patch_cs8409-tables.c
@@ -478,28 +478,29 @@ const struct snd_pci_quirk cs8409_fixup_tbl[] = {
SND_PCI_QUIRK(0x1028, 0x0A29, "Bullseye", CS8409_BULLSEYE),
SND_PCI_QUIRK(0x1028, 0x0A2A, "Bullseye", CS8409_BULLSEYE),
SND_PCI_QUIRK(0x1028, 0x0A2B, "Bullseye", CS8409_BULLSEYE),
+ SND_PCI_QUIRK(0x1028, 0x0A77, "Cyborg", CS8409_CYBORG),
+ SND_PCI_QUIRK(0x1028, 0x0A78, "Cyborg", CS8409_CYBORG),
+ SND_PCI_QUIRK(0x1028, 0x0A79, "Cyborg", CS8409_CYBORG),
+ SND_PCI_QUIRK(0x1028, 0x0A7A, "Cyborg", CS8409_CYBORG),
+ SND_PCI_QUIRK(0x1028, 0x0A7D, "Cyborg", CS8409_CYBORG),
+ SND_PCI_QUIRK(0x1028, 0x0A7E, "Cyborg", CS8409_CYBORG),
+ SND_PCI_QUIRK(0x1028, 0x0A7F, "Cyborg", CS8409_CYBORG),
+ SND_PCI_QUIRK(0x1028, 0x0A80, "Cyborg", CS8409_CYBORG),
SND_PCI_QUIRK(0x1028, 0x0AB0, "Warlock", CS8409_WARLOCK),
SND_PCI_QUIRK(0x1028, 0x0AB2, "Warlock", CS8409_WARLOCK),
SND_PCI_QUIRK(0x1028, 0x0AB1, "Warlock", CS8409_WARLOCK),
SND_PCI_QUIRK(0x1028, 0x0AB3, "Warlock", CS8409_WARLOCK),
SND_PCI_QUIRK(0x1028, 0x0AB4, "Warlock", CS8409_WARLOCK),
SND_PCI_QUIRK(0x1028, 0x0AB5, "Warlock", CS8409_WARLOCK),
+ SND_PCI_QUIRK(0x1028, 0x0ACF, "Dolphin", CS8409_DOLPHIN),
+ SND_PCI_QUIRK(0x1028, 0x0AD0, "Dolphin", CS8409_DOLPHIN),
+ SND_PCI_QUIRK(0x1028, 0x0AD1, "Dolphin", CS8409_DOLPHIN),
+ SND_PCI_QUIRK(0x1028, 0x0AD2, "Dolphin", CS8409_DOLPHIN),
+ SND_PCI_QUIRK(0x1028, 0x0AD3, "Dolphin", CS8409_DOLPHIN),
SND_PCI_QUIRK(0x1028, 0x0AD9, "Warlock", CS8409_WARLOCK),
SND_PCI_QUIRK(0x1028, 0x0ADA, "Warlock", CS8409_WARLOCK),
SND_PCI_QUIRK(0x1028, 0x0ADB, "Warlock", CS8409_WARLOCK),
SND_PCI_QUIRK(0x1028, 0x0ADC, "Warlock", CS8409_WARLOCK),
- SND_PCI_QUIRK(0x1028, 0x0AF4, "Warlock", CS8409_WARLOCK),
- SND_PCI_QUIRK(0x1028, 0x0AF5, "Warlock", CS8409_WARLOCK),
- SND_PCI_QUIRK(0x1028, 0x0BB5, "Warlock N3 15 TGL-U Nuvoton EC", CS8409_WARLOCK),
- SND_PCI_QUIRK(0x1028, 0x0BB6, "Warlock V3 15 TGL-U Nuvoton EC", CS8409_WARLOCK),
- SND_PCI_QUIRK(0x1028, 0x0A77, "Cyborg", CS8409_CYBORG),
- SND_PCI_QUIRK(0x1028, 0x0A78, "Cyborg", CS8409_CYBORG),
- SND_PCI_QUIRK(0x1028, 0x0A79, "Cyborg", CS8409_CYBORG),
- SND_PCI_QUIRK(0x1028, 0x0A7A, "Cyborg", CS8409_CYBORG),
- SND_PCI_QUIRK(0x1028, 0x0A7D, "Cyborg", CS8409_CYBORG),
- SND_PCI_QUIRK(0x1028, 0x0A7E, "Cyborg", CS8409_CYBORG),
- SND_PCI_QUIRK(0x1028, 0x0A7F, "Cyborg", CS8409_CYBORG),
- SND_PCI_QUIRK(0x1028, 0x0A80, "Cyborg", CS8409_CYBORG),
SND_PCI_QUIRK(0x1028, 0x0ADF, "Cyborg", CS8409_CYBORG),
SND_PCI_QUIRK(0x1028, 0x0AE0, "Cyborg", CS8409_CYBORG),
SND_PCI_QUIRK(0x1028, 0x0AE1, "Cyborg", CS8409_CYBORG),
@@ -512,11 +513,30 @@ const struct snd_pci_quirk cs8409_fixup_tbl[] = {
SND_PCI_QUIRK(0x1028, 0x0AEE, "Cyborg", CS8409_CYBORG),
SND_PCI_QUIRK(0x1028, 0x0AEF, "Cyborg", CS8409_CYBORG),
SND_PCI_QUIRK(0x1028, 0x0AF0, "Cyborg", CS8409_CYBORG),
- SND_PCI_QUIRK(0x1028, 0x0AD0, "Dolphin", CS8409_DOLPHIN),
- SND_PCI_QUIRK(0x1028, 0x0AD1, "Dolphin", CS8409_DOLPHIN),
- SND_PCI_QUIRK(0x1028, 0x0AD2, "Dolphin", CS8409_DOLPHIN),
- SND_PCI_QUIRK(0x1028, 0x0AD3, "Dolphin", CS8409_DOLPHIN),
- SND_PCI_QUIRK(0x1028, 0x0ACF, "Dolphin", CS8409_DOLPHIN),
+ SND_PCI_QUIRK(0x1028, 0x0AF4, "Warlock", CS8409_WARLOCK),
+ SND_PCI_QUIRK(0x1028, 0x0AF5, "Warlock", CS8409_WARLOCK),
+ SND_PCI_QUIRK(0x1028, 0x0B92, "Warlock MLK", CS8409_WARLOCK_MLK),
+ SND_PCI_QUIRK(0x1028, 0x0B93, "Warlock MLK Dual Mic", CS8409_WARLOCK_MLK_DUAL_MIC),
+ SND_PCI_QUIRK(0x1028, 0x0B94, "Warlock MLK", CS8409_WARLOCK_MLK),
+ SND_PCI_QUIRK(0x1028, 0x0B95, "Warlock MLK Dual Mic", CS8409_WARLOCK_MLK_DUAL_MIC),
+ SND_PCI_QUIRK(0x1028, 0x0B96, "Warlock MLK", CS8409_WARLOCK_MLK),
+ SND_PCI_QUIRK(0x1028, 0x0B97, "Warlock MLK Dual Mic", CS8409_WARLOCK_MLK_DUAL_MIC),
+ SND_PCI_QUIRK(0x1028, 0x0BB2, "Warlock MLK", CS8409_WARLOCK_MLK),
+ SND_PCI_QUIRK(0x1028, 0x0BB3, "Warlock MLK", CS8409_WARLOCK_MLK),
+ SND_PCI_QUIRK(0x1028, 0x0BB4, "Warlock MLK", CS8409_WARLOCK_MLK),
+ SND_PCI_QUIRK(0x1028, 0x0BB5, "Warlock N3 15 TGL-U Nuvoton EC", CS8409_WARLOCK),
+ SND_PCI_QUIRK(0x1028, 0x0BB6, "Warlock V3 15 TGL-U Nuvoton EC", CS8409_WARLOCK),
+ SND_PCI_QUIRK(0x1028, 0x0BB8, "Warlock MLK", CS8409_WARLOCK_MLK),
+ SND_PCI_QUIRK(0x1028, 0x0BB9, "Warlock MLK Dual Mic", CS8409_WARLOCK_MLK_DUAL_MIC),
+ SND_PCI_QUIRK(0x1028, 0x0BBA, "Warlock MLK", CS8409_WARLOCK_MLK),
+ SND_PCI_QUIRK(0x1028, 0x0BBB, "Warlock MLK Dual Mic", CS8409_WARLOCK_MLK_DUAL_MIC),
+ SND_PCI_QUIRK(0x1028, 0x0BBC, "Warlock MLK", CS8409_WARLOCK_MLK),
+ SND_PCI_QUIRK(0x1028, 0x0BBD, "Warlock MLK Dual Mic", CS8409_WARLOCK_MLK_DUAL_MIC),
+ SND_PCI_QUIRK(0x1028, 0x0BD4, "Dolphin", CS8409_DOLPHIN),
+ SND_PCI_QUIRK(0x1028, 0x0BD5, "Dolphin", CS8409_DOLPHIN),
+ SND_PCI_QUIRK(0x1028, 0x0BD6, "Dolphin", CS8409_DOLPHIN),
+ SND_PCI_QUIRK(0x1028, 0x0BD7, "Dolphin", CS8409_DOLPHIN),
+ SND_PCI_QUIRK(0x1028, 0x0BD8, "Dolphin", CS8409_DOLPHIN),
{} /* terminator */
};
@@ -524,6 +544,8 @@ const struct snd_pci_quirk cs8409_fixup_tbl[] = {
const struct hda_model_fixup cs8409_models[] = {
{ .id = CS8409_BULLSEYE, .name = "bullseye" },
{ .id = CS8409_WARLOCK, .name = "warlock" },
+ { .id = CS8409_WARLOCK_MLK, .name = "warlock mlk" },
+ { .id = CS8409_WARLOCK_MLK_DUAL_MIC, .name = "warlock mlk dual mic" },
{ .id = CS8409_CYBORG, .name = "cyborg" },
{ .id = CS8409_DOLPHIN, .name = "dolphin" },
{}
@@ -542,6 +564,18 @@ const struct hda_fixup cs8409_fixups[] = {
.chained = true,
.chain_id = CS8409_FIXUPS,
},
+ [CS8409_WARLOCK_MLK] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = cs8409_cs42l42_pincfgs,
+ .chained = true,
+ .chain_id = CS8409_FIXUPS,
+ },
+ [CS8409_WARLOCK_MLK_DUAL_MIC] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = cs8409_cs42l42_pincfgs,
+ .chained = true,
+ .chain_id = CS8409_FIXUPS,
+ },
[CS8409_CYBORG] = {
.type = HDA_FIXUP_PINS,
.v.pins = cs8409_cs42l42_pincfgs,
diff --git a/sound/pci/hda/patch_cs8409.c b/sound/pci/hda/patch_cs8409.c
index aff2b5abb81e..343fabc4387d 100644
--- a/sound/pci/hda/patch_cs8409.c
+++ b/sound/pci/hda/patch_cs8409.c
@@ -733,6 +733,7 @@ static void cs42l42_resume(struct sub_codec *cs42l42)
{ 0x130A, 0x00 },
{ 0x130F, 0x00 },
};
+ int fsv_old, fsv_new;
/* Bring CS42L42 out of Reset */
gpio_data = snd_hda_codec_read(codec, CS8409_PIN_AFG, 0, AC_VERB_GET_GPIO_DATA, 0);
@@ -749,8 +750,13 @@ static void cs42l42_resume(struct sub_codec *cs42l42)
/* Clear interrupts, by reading interrupt status registers */
cs8409_i2c_bulk_read(cs42l42, irq_regs, ARRAY_SIZE(irq_regs));
- if (cs42l42->full_scale_vol)
- cs8409_i2c_write(cs42l42, 0x2001, 0x01);
+ fsv_old = cs8409_i2c_read(cs42l42, 0x2001);
+ if (cs42l42->full_scale_vol == CS42L42_FULL_SCALE_VOL_0DB)
+ fsv_new = fsv_old & ~CS42L42_FULL_SCALE_VOL_MASK;
+ else
+ fsv_new = fsv_old & CS42L42_FULL_SCALE_VOL_MASK;
+ if (fsv_new != fsv_old)
+ cs8409_i2c_write(cs42l42, 0x2001, fsv_new);
/* we have to explicitly allow unsol event handling even during the
* resume phase so that the jack event is processed properly
@@ -906,9 +912,15 @@ static void cs8409_cs42l42_hw_init(struct hda_codec *codec)
cs8409_vendor_coef_set(codec, seq_bullseye->cir, seq_bullseye->coeff);
}
- /* DMIC1_MO=00b, DMIC1/2_SR=1 */
- if (codec->fixup_id == CS8409_WARLOCK || codec->fixup_id == CS8409_CYBORG)
- cs8409_vendor_coef_set(codec, 0x09, 0x0003);
+ switch (codec->fixup_id) {
+ case CS8409_CYBORG:
+ case CS8409_WARLOCK_MLK_DUAL_MIC:
+ /* DMIC1_MO=00b, DMIC1/2_SR=1 */
+ cs8409_vendor_coef_set(codec, CS8409_DMIC_CFG, 0x0003);
+ break;
+ default:
+ break;
+ }
cs42l42_resume(cs42l42);
@@ -993,25 +1005,17 @@ void cs8409_cs42l42_fixups(struct hda_codec *codec, const struct hda_fixup *fix,
cs8409_fix_caps(codec, CS8409_CS42L42_HP_PIN_NID);
cs8409_fix_caps(codec, CS8409_CS42L42_AMIC_PIN_NID);
- /* Set TIP_SENSE_EN for analog front-end of tip sense.
- * Additionally set HSBIAS_SENSE_EN and Full Scale volume for some variants.
- */
+ /* Set HSBIAS_SENSE_EN and Full Scale volume for some variants. */
switch (codec->fixup_id) {
- case CS8409_WARLOCK:
+ case CS8409_WARLOCK_MLK:
+ case CS8409_WARLOCK_MLK_DUAL_MIC:
spec->scodecs[CS8409_CODEC0]->hsbias_hiz = 0x0020;
- spec->scodecs[CS8409_CODEC0]->full_scale_vol = 1;
- break;
- case CS8409_BULLSEYE:
- spec->scodecs[CS8409_CODEC0]->hsbias_hiz = 0x0020;
- spec->scodecs[CS8409_CODEC0]->full_scale_vol = 0;
- break;
- case CS8409_CYBORG:
- spec->scodecs[CS8409_CODEC0]->hsbias_hiz = 0x00a0;
- spec->scodecs[CS8409_CODEC0]->full_scale_vol = 1;
+ spec->scodecs[CS8409_CODEC0]->full_scale_vol = CS42L42_FULL_SCALE_VOL_0DB;
break;
default:
- spec->scodecs[CS8409_CODEC0]->hsbias_hiz = 0x0003;
- spec->scodecs[CS8409_CODEC0]->full_scale_vol = 1;
+ spec->scodecs[CS8409_CODEC0]->hsbias_hiz = 0x0020;
+ spec->scodecs[CS8409_CODEC0]->full_scale_vol =
+ CS42L42_FULL_SCALE_VOL_MINUS6DB;
break;
}
@@ -1222,6 +1226,9 @@ void dolphin_fixups(struct hda_codec *codec, const struct hda_fixup *fix, int ac
cs8409_fix_caps(codec, DOLPHIN_LO_PIN_NID);
cs8409_fix_caps(codec, DOLPHIN_AMIC_PIN_NID);
+ spec->scodecs[CS8409_CODEC0]->full_scale_vol = CS42L42_FULL_SCALE_VOL_MINUS6DB;
+ spec->scodecs[CS8409_CODEC1]->full_scale_vol = CS42L42_FULL_SCALE_VOL_MINUS6DB;
+
break;
case HDA_FIXUP_ACT_PROBE:
/* Fix Sample Rate to 48kHz */
diff --git a/sound/pci/hda/patch_cs8409.h b/sound/pci/hda/patch_cs8409.h
index d0b725c7285b..7df46bd8d2da 100644
--- a/sound/pci/hda/patch_cs8409.h
+++ b/sound/pci/hda/patch_cs8409.h
@@ -235,6 +235,9 @@ enum cs8409_coefficient_index_registers {
#define CS42L42_I2C_SLEEP_US (2000)
#define CS42L42_PDN_TIMEOUT_US (250000)
#define CS42L42_PDN_SLEEP_US (2000)
+#define CS42L42_FULL_SCALE_VOL_MASK (2)
+#define CS42L42_FULL_SCALE_VOL_0DB (1)
+#define CS42L42_FULL_SCALE_VOL_MINUS6DB (0)
/* Dell BULLSEYE / WARLOCK / CYBORG Specific Definitions */
@@ -264,6 +267,8 @@ enum cs8409_coefficient_index_registers {
enum {
CS8409_BULLSEYE,
CS8409_WARLOCK,
+ CS8409_WARLOCK_MLK,
+ CS8409_WARLOCK_MLK_DUAL_MIC,
CS8409_CYBORG,
CS8409_FIXUPS,
CS8409_DOLPHIN,
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index c85ed7bc121e..3e086eebf88d 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -1625,6 +1625,7 @@ static void hdmi_present_sense_via_verbs(struct hdmi_spec_per_pin *per_pin,
struct hda_codec *codec = per_pin->codec;
struct hdmi_spec *spec = codec->spec;
struct hdmi_eld *eld = &spec->temp_eld;
+ struct device *dev = hda_codec_dev(codec);
hda_nid_t pin_nid = per_pin->pin_nid;
int dev_id = per_pin->dev_id;
/*
@@ -1638,8 +1639,13 @@ static void hdmi_present_sense_via_verbs(struct hdmi_spec_per_pin *per_pin,
int present;
int ret;
+#ifdef CONFIG_PM
+ if (dev->power.runtime_status == RPM_SUSPENDING)
+ return;
+#endif
+
ret = snd_hda_power_up_pm(codec);
- if (ret < 0 && pm_runtime_suspended(hda_codec_dev(codec)))
+ if (ret < 0 && pm_runtime_suspended(dev))
goto out;
present = snd_hda_jack_pin_sense(codec, pin_nid, dev_id);
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index c78f16944f43..4e12af24b4d3 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -3617,8 +3617,8 @@ static void alc256_shutup(struct hda_codec *codec)
/* If disable 3k pulldown control for alc257, the Mic detection will not work correctly
* when booting with headset plugged. So skip setting it for the codec alc257
*/
- if (spec->codec_variant != ALC269_TYPE_ALC257 &&
- spec->codec_variant != ALC269_TYPE_ALC256)
+ if (codec->core.vendor_id != 0x10ec0236 &&
+ codec->core.vendor_id != 0x10ec0257)
alc_update_coef_idx(codec, 0x46, 0, 3 << 12);
if (!spec->no_shutup_pins)
@@ -7006,6 +7006,7 @@ enum {
ALC287_FIXUP_LEGION_16ACHG6,
ALC287_FIXUP_CS35L41_I2C_2,
ALC245_FIXUP_CS35L41_SPI_2,
+ ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED,
ALC245_FIXUP_CS35L41_SPI_4,
ALC245_FIXUP_CS35L41_SPI_4_HP_GPIO_LED,
ALC285_FIXUP_HP_SPEAKERS_MICMUTE_LED,
@@ -8771,6 +8772,12 @@ static const struct hda_fixup alc269_fixups[] = {
.type = HDA_FIXUP_FUNC,
.v.func = cs35l41_fixup_spi_two,
},
+ [ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = cs35l41_fixup_spi_two,
+ .chained = true,
+ .chain_id = ALC285_FIXUP_HP_GPIO_LED,
+ },
[ALC245_FIXUP_CS35L41_SPI_4] = {
.type = HDA_FIXUP_FUNC,
.v.func = cs35l41_fixup_spi_four,
@@ -9026,7 +9033,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x89ac, "HP EliteBook 640 G9", ALC236_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x89ae, "HP EliteBook 650 G9", ALC236_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x89c3, "Zbook Studio G9", ALC245_FIXUP_CS35L41_SPI_4_HP_GPIO_LED),
- SND_PCI_QUIRK(0x103c, 0x89c6, "Zbook Fury 17 G9", ALC245_FIXUP_CS35L41_SPI_2),
+ SND_PCI_QUIRK(0x103c, 0x89c6, "Zbook Fury 17 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x89ca, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
@@ -11140,6 +11147,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD),
SND_PCI_QUIRK(0x14cd, 0x5003, "USI", ALC662_FIXUP_USI_HEADSET_MODE),
SND_PCI_QUIRK(0x17aa, 0x1036, "Lenovo P520", ALC662_FIXUP_LENOVO_MULTI_CODECS),
+ SND_PCI_QUIRK(0x17aa, 0x1057, "Lenovo P360", ALC897_FIXUP_HEADSET_MIC_PIN),
SND_PCI_QUIRK(0x17aa, 0x32ca, "Lenovo ThinkCentre M80", ALC897_FIXUP_HEADSET_MIC_PIN),
SND_PCI_QUIRK(0x17aa, 0x32cb, "Lenovo ThinkCentre M70", ALC897_FIXUP_HEADSET_MIC_PIN),
SND_PCI_QUIRK(0x17aa, 0x32cf, "Lenovo ThinkCentre M950", ALC897_FIXUP_HEADSET_MIC_PIN),
diff --git a/sound/soc/codecs/mt6358.c b/sound/soc/codecs/mt6358.c
index 9b263a9a669d..4c7b5d940799 100644
--- a/sound/soc/codecs/mt6358.c
+++ b/sound/soc/codecs/mt6358.c
@@ -107,6 +107,7 @@ int mt6358_set_mtkaif_protocol(struct snd_soc_component *cmpnt,
priv->mtkaif_protocol = mtkaif_protocol;
return 0;
}
+EXPORT_SYMBOL_GPL(mt6358_set_mtkaif_protocol);
static void playback_gpio_set(struct mt6358_priv *priv)
{
@@ -273,6 +274,7 @@ int mt6358_mtkaif_calibration_enable(struct snd_soc_component *cmpnt)
1 << RG_AUD_PAD_TOP_DAT_MISO_LOOPBACK_SFT);
return 0;
}
+EXPORT_SYMBOL_GPL(mt6358_mtkaif_calibration_enable);
int mt6358_mtkaif_calibration_disable(struct snd_soc_component *cmpnt)
{
@@ -296,6 +298,7 @@ int mt6358_mtkaif_calibration_disable(struct snd_soc_component *cmpnt)
capture_gpio_reset(priv);
return 0;
}
+EXPORT_SYMBOL_GPL(mt6358_mtkaif_calibration_disable);
int mt6358_set_mtkaif_calibration_phase(struct snd_soc_component *cmpnt,
int phase_1, int phase_2)
@@ -310,6 +313,7 @@ int mt6358_set_mtkaif_calibration_phase(struct snd_soc_component *cmpnt,
phase_2 << RG_AUD_PAD_TOP_PHASE_MODE2_SFT);
return 0;
}
+EXPORT_SYMBOL_GPL(mt6358_set_mtkaif_calibration_phase);
/* dl pga gain */
enum {
diff --git a/sound/soc/fsl/fsl-asoc-card.c b/sound/soc/fsl/fsl-asoc-card.c
index 370bc790c6ba..d9a0d4768c4d 100644
--- a/sound/soc/fsl/fsl-asoc-card.c
+++ b/sound/soc/fsl/fsl-asoc-card.c
@@ -462,11 +462,9 @@ static int hp_jack_event(struct notifier_block *nb, unsigned long event,
if (event & SND_JACK_HEADPHONE)
/* Disable speaker if headphone is plugged in */
- snd_soc_dapm_disable_pin(dapm, "Ext Spk");
+ return snd_soc_dapm_disable_pin(dapm, "Ext Spk");
else
- snd_soc_dapm_enable_pin(dapm, "Ext Spk");
-
- return 0;
+ return snd_soc_dapm_enable_pin(dapm, "Ext Spk");
}
static struct notifier_block hp_jack_nb = {
@@ -481,11 +479,9 @@ static int mic_jack_event(struct notifier_block *nb, unsigned long event,
if (event & SND_JACK_MICROPHONE)
/* Disable dmic if microphone is plugged in */
- snd_soc_dapm_disable_pin(dapm, "DMIC");
+ return snd_soc_dapm_disable_pin(dapm, "DMIC");
else
- snd_soc_dapm_enable_pin(dapm, "DMIC");
-
- return 0;
+ return snd_soc_dapm_enable_pin(dapm, "DMIC");
}
static struct notifier_block mic_jack_nb = {
diff --git a/sound/soc/rockchip/rockchip_i2s_tdm.c b/sound/soc/rockchip/rockchip_i2s_tdm.c
index d3b710406941..98700e75b82a 100644
--- a/sound/soc/rockchip/rockchip_i2s_tdm.c
+++ b/sound/soc/rockchip/rockchip_i2s_tdm.c
@@ -469,14 +469,14 @@ static int rockchip_i2s_tdm_set_fmt(struct snd_soc_dai *cpu_dai,
txcr_val = I2S_TXCR_IBM_NORMAL;
rxcr_val = I2S_RXCR_IBM_NORMAL;
break;
- case SND_SOC_DAIFMT_DSP_A: /* PCM no delay mode */
- txcr_val = I2S_TXCR_TFS_PCM;
- rxcr_val = I2S_RXCR_TFS_PCM;
- break;
- case SND_SOC_DAIFMT_DSP_B: /* PCM delay 1 mode */
+ case SND_SOC_DAIFMT_DSP_A: /* PCM delay 1 mode */
txcr_val = I2S_TXCR_TFS_PCM | I2S_TXCR_PBM_MODE(1);
rxcr_val = I2S_RXCR_TFS_PCM | I2S_RXCR_PBM_MODE(1);
break;
+ case SND_SOC_DAIFMT_DSP_B: /* PCM no delay mode */
+ txcr_val = I2S_TXCR_TFS_PCM;
+ rxcr_val = I2S_RXCR_TFS_PCM;
+ break;
default:
ret = -EINVAL;
goto err_pm_put;
diff --git a/sound/soc/sof/intel/Kconfig b/sound/soc/sof/intel/Kconfig
index b53f216d4ecc..172419392b33 100644
--- a/sound/soc/sof/intel/Kconfig
+++ b/sound/soc/sof/intel/Kconfig
@@ -84,6 +84,7 @@ if SND_SOC_SOF_PCI
config SND_SOC_SOF_MERRIFIELD
tristate "SOF support for Tangier/Merrifield"
default SND_SOC_SOF_PCI
+ select SND_SOC_SOF_PCI_DEV
select SND_SOC_SOF_INTEL_ATOM_HIFI_EP
help
This adds support for Sound Open Firmware for Intel(R) platforms
diff --git a/tools/vm/page_owner_sort.c b/tools/vm/page_owner_sort.c
index 7679335fce5b..7d98e76c2291 100644
--- a/tools/vm/page_owner_sort.c
+++ b/tools/vm/page_owner_sort.c
@@ -441,7 +441,6 @@ static void usage(void)
"-n\t\tSort by task command name.\n"
"-a\t\tSort by memory allocate time.\n"
"-r\t\tSort by memory release time.\n"
- "-c\t\tCull by comparing stacktrace instead of total block.\n"
"-f\t\tFilter out the information of blocks whose memory has been released.\n"
"--pid <PID>\tSelect by pid. This selects the information of blocks whose process ID number equals to <PID>.\n"
"--tgid <TGID>\tSelect by tgid. This selects the information of blocks whose Thread Group ID number equals to <TGID>.\n"
@@ -466,14 +465,11 @@ int main(int argc, char **argv)
{ 0, 0, 0, 0},
};
- while ((opt = getopt_long(argc, argv, "acfmnprstP", longopts, NULL)) != -1)
+ while ((opt = getopt_long(argc, argv, "afmnprstP", longopts, NULL)) != -1)
switch (opt) {
case 'a':
cmp = compare_ts;
break;
- case 'c':
- cull = cull | CULL_STACKTRACE;
- break;
case 'f':
filter = filter | FILTER_UNRELEASE;
break;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 69c318fdff61..70e05af5ebea 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -117,6 +117,8 @@ EXPORT_SYMBOL_GPL(kvm_debugfs_dir);
static const struct file_operations stat_fops_per_vm;
+static struct file_operations kvm_chardev_ops;
+
static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
unsigned long arg);
#ifdef CONFIG_KVM_COMPAT
@@ -251,7 +253,8 @@ static void kvm_make_vcpu_request(struct kvm_vcpu *vcpu, unsigned int req,
{
int cpu;
- kvm_make_request(req, vcpu);
+ if (likely(!(req & KVM_REQUEST_NO_ACTION)))
+ __kvm_make_request(req, vcpu);
if (!(req & KVM_REQUEST_NO_WAKEUP) && kvm_vcpu_wake_up(vcpu))
return;
@@ -1131,6 +1134,16 @@ static struct kvm *kvm_create_vm(unsigned long type)
preempt_notifier_inc();
kvm_init_pm_notifier(kvm);
+ /*
+ * When the fd passed to this ioctl() is opened it pins the module,
+ * but try_module_get() also prevents getting a reference if the module
+ * is in MODULE_STATE_GOING (e.g. if someone ran "rmmod --wait").
+ */
+ if (!try_module_get(kvm_chardev_ops.owner)) {
+ r = -ENODEV;
+ goto out_err;
+ }
+
return kvm;
out_err:
@@ -1220,6 +1233,7 @@ static void kvm_destroy_vm(struct kvm *kvm)
preempt_notifier_dec();
hardware_disable_all();
mmdrop(mm);
+ module_put(kvm_chardev_ops.owner);
}
void kvm_get_kvm(struct kvm *kvm)
@@ -3663,7 +3677,7 @@ static int kvm_vcpu_release(struct inode *inode, struct file *filp)
return 0;
}
-static struct file_operations kvm_vcpu_fops = {
+static const struct file_operations kvm_vcpu_fops = {
.release = kvm_vcpu_release,
.unlocked_ioctl = kvm_vcpu_ioctl,
.mmap = kvm_vcpu_mmap,
@@ -4714,7 +4728,7 @@ static long kvm_vm_compat_ioctl(struct file *filp,
}
#endif
-static struct file_operations kvm_vm_fops = {
+static const struct file_operations kvm_vm_fops = {
.release = kvm_vm_release,
.unlocked_ioctl = kvm_vm_ioctl,
.llseek = noop_llseek,
@@ -5721,8 +5735,6 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
goto out_free_5;
kvm_chardev_ops.owner = module;
- kvm_vm_fops.owner = module;
- kvm_vcpu_fops.owner = module;
r = misc_register(&kvm_dev);
if (r) {
diff --git a/virt/kvm/pfncache.c b/virt/kvm/pfncache.c
index ce878f4be4da..dd84676615f1 100644
--- a/virt/kvm/pfncache.c
+++ b/virt/kvm/pfncache.c
@@ -27,7 +27,7 @@ void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start,
{
DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS);
struct gfn_to_pfn_cache *gpc;
- bool wake_vcpus = false;
+ bool evict_vcpus = false;
spin_lock(&kvm->gpc_lock);
list_for_each_entry(gpc, &kvm->gpc_list, list) {
@@ -40,41 +40,32 @@ void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start,
/*
* If a guest vCPU could be using the physical address,
- * it needs to be woken.
+ * it needs to be forced out of guest mode.
*/
- if (gpc->guest_uses_pa) {
- if (!wake_vcpus) {
- wake_vcpus = true;
+ if (gpc->usage & KVM_GUEST_USES_PFN) {
+ if (!evict_vcpus) {
+ evict_vcpus = true;
bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS);
}
__set_bit(gpc->vcpu->vcpu_idx, vcpu_bitmap);
}
-
- /*
- * We cannot call mark_page_dirty() from here because
- * this physical CPU might not have an active vCPU
- * with which to do the KVM dirty tracking.
- *
- * Neither is there any point in telling the kernel MM
- * that the underlying page is dirty. A vCPU in guest
- * mode might still be writing to it up to the point
- * where we wake them a few lines further down anyway.
- *
- * So all the dirty marking happens on the unmap.
- */
}
write_unlock_irq(&gpc->lock);
}
spin_unlock(&kvm->gpc_lock);
- if (wake_vcpus) {
- unsigned int req = KVM_REQ_GPC_INVALIDATE;
+ if (evict_vcpus) {
+ /*
+ * KVM needs to ensure the vCPU is fully out of guest context
+ * before allowing the invalidation to continue.
+ */
+ unsigned int req = KVM_REQ_OUTSIDE_GUEST_MODE;
bool called;
/*
* If the OOM reaper is active, then all vCPUs should have
* been stopped already, so perform the request without
- * KVM_REQUEST_WAIT and be sad if any needed to be woken.
+ * KVM_REQUEST_WAIT and be sad if any needed to be IPI'd.
*/
if (!may_block)
req &= ~KVM_REQUEST_WAIT;
@@ -104,8 +95,7 @@ bool kvm_gfn_to_pfn_cache_check(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
}
EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_check);
-static void __release_gpc(struct kvm *kvm, kvm_pfn_t pfn, void *khva,
- gpa_t gpa, bool dirty)
+static void __release_gpc(struct kvm *kvm, kvm_pfn_t pfn, void *khva, gpa_t gpa)
{
/* Unmap the old page if it was mapped before, and release it */
if (!is_error_noslot_pfn(pfn)) {
@@ -118,9 +108,7 @@ static void __release_gpc(struct kvm *kvm, kvm_pfn_t pfn, void *khva,
#endif
}
- kvm_release_pfn(pfn, dirty);
- if (dirty)
- mark_page_dirty(kvm, gpa);
+ kvm_release_pfn(pfn, false);
}
}
@@ -152,7 +140,7 @@ static kvm_pfn_t hva_to_pfn_retry(struct kvm *kvm, unsigned long uhva)
}
int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
- gpa_t gpa, unsigned long len, bool dirty)
+ gpa_t gpa, unsigned long len)
{
struct kvm_memslots *slots = kvm_memslots(kvm);
unsigned long page_offset = gpa & ~PAGE_MASK;
@@ -160,7 +148,7 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
unsigned long old_uhva;
gpa_t old_gpa;
void *old_khva;
- bool old_valid, old_dirty;
+ bool old_valid;
int ret = 0;
/*
@@ -177,20 +165,19 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
old_khva = gpc->khva - offset_in_page(gpc->khva);
old_uhva = gpc->uhva;
old_valid = gpc->valid;
- old_dirty = gpc->dirty;
/* If the userspace HVA is invalid, refresh that first */
if (gpc->gpa != gpa || gpc->generation != slots->generation ||
kvm_is_error_hva(gpc->uhva)) {
gfn_t gfn = gpa_to_gfn(gpa);
- gpc->dirty = false;
gpc->gpa = gpa;
gpc->generation = slots->generation;
gpc->memslot = __gfn_to_memslot(slots, gfn);
gpc->uhva = gfn_to_hva_memslot(gpc->memslot, gfn);
if (kvm_is_error_hva(gpc->uhva)) {
+ gpc->pfn = KVM_PFN_ERR_FAULT;
ret = -EFAULT;
goto out;
}
@@ -219,7 +206,7 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
goto map_done;
}
- if (gpc->kernel_map) {
+ if (gpc->usage & KVM_HOST_USES_PFN) {
if (new_pfn == old_pfn) {
new_khva = old_khva;
old_pfn = KVM_PFN_ERR_FAULT;
@@ -255,14 +242,9 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
}
out:
- if (ret)
- gpc->dirty = false;
- else
- gpc->dirty = dirty;
-
write_unlock_irq(&gpc->lock);
- __release_gpc(kvm, old_pfn, old_khva, old_gpa, old_dirty);
+ __release_gpc(kvm, old_pfn, old_khva, old_gpa);
return ret;
}
@@ -272,7 +254,6 @@ void kvm_gfn_to_pfn_cache_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc)
{
void *old_khva;
kvm_pfn_t old_pfn;
- bool old_dirty;
gpa_t old_gpa;
write_lock_irq(&gpc->lock);
@@ -280,7 +261,6 @@ void kvm_gfn_to_pfn_cache_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc)
gpc->valid = false;
old_khva = gpc->khva - offset_in_page(gpc->khva);
- old_dirty = gpc->dirty;
old_gpa = gpc->gpa;
old_pfn = gpc->pfn;
@@ -293,16 +273,17 @@ void kvm_gfn_to_pfn_cache_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc)
write_unlock_irq(&gpc->lock);
- __release_gpc(kvm, old_pfn, old_khva, old_gpa, old_dirty);
+ __release_gpc(kvm, old_pfn, old_khva, old_gpa);
}
EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_unmap);
int kvm_gfn_to_pfn_cache_init(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
- struct kvm_vcpu *vcpu, bool guest_uses_pa,
- bool kernel_map, gpa_t gpa, unsigned long len,
- bool dirty)
+ struct kvm_vcpu *vcpu, enum pfn_cache_usage usage,
+ gpa_t gpa, unsigned long len)
{
+ WARN_ON_ONCE(!usage || (usage & KVM_GUEST_AND_HOST_USE_PFN) != usage);
+
if (!gpc->active) {
rwlock_init(&gpc->lock);
@@ -310,8 +291,7 @@ int kvm_gfn_to_pfn_cache_init(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
gpc->pfn = KVM_PFN_ERR_FAULT;
gpc->uhva = KVM_HVA_ERR_BAD;
gpc->vcpu = vcpu;
- gpc->kernel_map = kernel_map;
- gpc->guest_uses_pa = guest_uses_pa;
+ gpc->usage = usage;
gpc->valid = false;
gpc->active = true;
@@ -319,7 +299,7 @@ int kvm_gfn_to_pfn_cache_init(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
list_add(&gpc->list, &kvm->gpc_list);
spin_unlock(&kvm->gpc_lock);
}
- return kvm_gfn_to_pfn_cache_refresh(kvm, gpc, gpa, len, dirty);
+ return kvm_gfn_to_pfn_cache_refresh(kvm, gpc, gpa, len);
}
EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_init);