diff options
303 files changed, 10255 insertions, 4164 deletions
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index d0f6c055dfcc..fb8752b42ec8 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -2824,17 +2824,21 @@ nvhe: Standard nVHE-based mode, without support for protected guests. - protected: nVHE-based mode with support for guests whose - state is kept private from the host. + protected: Mode with support for guests whose state is + kept private from the host, using VHE or + nVHE depending on HW support. nested: VHE-based mode with support for nested - virtualization. Requires at least ARMv8.3 - hardware. + virtualization. Requires at least ARMv8.4 + hardware (with FEAT_NV2). Defaults to VHE/nVHE based on hardware support. Setting mode to "protected" will disable kexec and hibernation - for the host. "nested" is experimental and should be - used with extreme caution. + for the host. To force nVHE on VHE hardware, add + "arm64_sw.hvhe=0 id_aa64mmfr1.vh=0" to the + command-line. + "nested" is experimental and should be used with + extreme caution. kvm-arm.vgic_v3_group0_trap= [KVM,ARM,EARLY] Trap guest accesses to GICv3 group-0 diff --git a/Documentation/devicetree/bindings/serial/nxp,sc16is7xx.yaml b/Documentation/devicetree/bindings/serial/nxp,sc16is7xx.yaml index 88871480018e..ab39b95dae40 100644 --- a/Documentation/devicetree/bindings/serial/nxp,sc16is7xx.yaml +++ b/Documentation/devicetree/bindings/serial/nxp,sc16is7xx.yaml @@ -23,6 +23,8 @@ properties: maxItems: 1 interrupts: + description: + When missing, device driver uses polling instead. maxItems: 1 clocks: @@ -76,7 +78,6 @@ properties: required: - compatible - reg - - interrupts allOf: - $ref: /schemas/spi/spi-peripheral-props.yaml# diff --git a/Documentation/devicetree/bindings/serial/renesas,scif.yaml b/Documentation/devicetree/bindings/serial/renesas,scif.yaml index 51d9fb0f4763..8e82999e6acb 100644 --- a/Documentation/devicetree/bindings/serial/renesas,scif.yaml +++ b/Documentation/devicetree/bindings/serial/renesas,scif.yaml @@ -83,6 +83,11 @@ properties: - const: renesas,scif-r9a09g057 # RZ/V2H(P) + - items: + - enum: + - renesas,scif-r9a09g047 # RZ/G3E + - const: renesas,scif-r9a09g057 # RZ/V2H fallback + reg: maxItems: 1 diff --git a/Documentation/filesystems/debugfs.rst b/Documentation/filesystems/debugfs.rst index dc35da8b8792..f7f977ffbf8d 100644 --- a/Documentation/filesystems/debugfs.rst +++ b/Documentation/filesystems/debugfs.rst @@ -211,18 +211,16 @@ seq_file content. There are a couple of other directory-oriented helper functions:: - struct dentry *debugfs_rename(struct dentry *old_dir, - struct dentry *old_dentry, - struct dentry *new_dir, - const char *new_name); + struct dentry *debugfs_change_name(struct dentry *dentry, + const char *fmt, ...); struct dentry *debugfs_create_symlink(const char *name, struct dentry *parent, const char *target); -A call to debugfs_rename() will give a new name to an existing debugfs -file, possibly in a different directory. The new_name must not exist prior -to the call; the return value is old_dentry with updated information. +A call to debugfs_change_name() will give a new name to an existing debugfs +file, always in the same directory. The new_name must not exist prior +to the call; the return value is 0 on success and -E... on failuer. Symbolic links can be created with debugfs_create_symlink(). There is one important thing that all debugfs users must take into account: diff --git a/Documentation/filesystems/fuse-io-uring.rst b/Documentation/filesystems/fuse-io-uring.rst new file mode 100644 index 000000000000..d73dd0dbd238 --- /dev/null +++ b/Documentation/filesystems/fuse-io-uring.rst @@ -0,0 +1,99 @@ +.. SPDX-License-Identifier: GPL-2.0 + +======================================= +FUSE-over-io-uring design documentation +======================================= + +This documentation covers basic details how the fuse +kernel/userspace communication through io-uring is configured +and works. For generic details about FUSE see fuse.rst. + +This document also covers the current interface, which is +still in development and might change. + +Limitations +=========== +As of now not all requests types are supported through io-uring, userspace +is required to also handle requests through /dev/fuse after io-uring setup +is complete. Specifically notifications (initiated from the daemon side) +and interrupts. + +Fuse io-uring configuration +=========================== + +Fuse kernel requests are queued through the classical /dev/fuse +read/write interface - until io-uring setup is complete. + +In order to set up fuse-over-io-uring fuse-server (user-space) +needs to submit SQEs (opcode = IORING_OP_URING_CMD) to the /dev/fuse +connection file descriptor. Initial submit is with the sub command +FUSE_URING_REQ_REGISTER, which will just register entries to be +available in the kernel. + +Once at least one entry per queue is submitted, kernel starts +to enqueue to ring queues. +Note, every CPU core has its own fuse-io-uring queue. +Userspace handles the CQE/fuse-request and submits the result as +subcommand FUSE_URING_REQ_COMMIT_AND_FETCH - kernel completes +the requests and also marks the entry available again. If there are +pending requests waiting the request will be immediately submitted +to the daemon again. + +Initial SQE +-----------:: + + | | FUSE filesystem daemon + | | + | | >io_uring_submit() + | | IORING_OP_URING_CMD / + | | FUSE_URING_CMD_REGISTER + | | [wait cqe] + | | >io_uring_wait_cqe() or + | | >io_uring_submit_and_wait() + | | + | >fuse_uring_cmd() | + | >fuse_uring_register() | + + +Sending requests with CQEs +--------------------------:: + + | | FUSE filesystem daemon + | | [waiting for CQEs] + | "rm /mnt/fuse/file" | + | | + | >sys_unlink() | + | >fuse_unlink() | + | [allocate request] | + | >fuse_send_one() | + | ... | + | >fuse_uring_queue_fuse_req | + | [queue request on fg queue] | + | >fuse_uring_add_req_to_ring_ent() | + | ... | + | >fuse_uring_copy_to_ring() | + | >io_uring_cmd_done() | + | >request_wait_answer() | + | [sleep on req->waitq] | + | | [receives and handles CQE] + | | [submit result and fetch next] + | | >io_uring_submit() + | | IORING_OP_URING_CMD/ + | | FUSE_URING_CMD_COMMIT_AND_FETCH + | >fuse_uring_cmd() | + | >fuse_uring_commit_fetch() | + | >fuse_uring_commit() | + | >fuse_uring_copy_from_ring() | + | [ copy the result to the fuse req] | + | >fuse_uring_req_end() | + | >fuse_request_end() | + | [wake up req->waitq] | + | >fuse_uring_next_fuse_req | + | [wait or handle next req] | + | | + | [req->waitq woken up] | + | <fuse_unlink() | + | <sys_unlink() | + + + diff --git a/Documentation/filesystems/index.rst b/Documentation/filesystems/index.rst index 44e9e77ffe0d..2636f2a41bd3 100644 --- a/Documentation/filesystems/index.rst +++ b/Documentation/filesystems/index.rst @@ -98,6 +98,7 @@ Documentation for filesystem implementations. hpfs fuse fuse-io + fuse-io-uring inotify isofs nilfs2 diff --git a/Documentation/filesystems/nfs/localio.rst b/Documentation/filesystems/nfs/localio.rst index bd1967e2eab3..79808b37d745 100644 --- a/Documentation/filesystems/nfs/localio.rst +++ b/Documentation/filesystems/nfs/localio.rst @@ -218,64 +218,30 @@ NFS Client and Server Interlock =============================== LOCALIO provides the nfs_uuid_t object and associated interfaces to -allow proper network namespace (net-ns) and NFSD object refcounting: - - We don't want to keep a long-term counted reference on each NFSD's - net-ns in the client because that prevents a server container from - completely shutting down. - - So we avoid taking a reference at all and rely on the per-cpu - reference to the server (detailed below) being sufficient to keep - the net-ns active. This involves allowing the NFSD's net-ns exit - code to iterate all active clients and clear their ->net pointers - (which are needed to find the per-cpu-refcount for the nfsd_serv). - - Details: - - - Embed nfs_uuid_t in nfs_client. nfs_uuid_t provides a list_head - that can be used to find the client. It does add the 16-byte - uuid_t to nfs_client so it is bigger than needed (given that - uuid_t is only used during the initial NFS client and server - LOCALIO handshake to determine if they are local to each other). - If that is really a problem we can find a fix. - - - When the nfs server confirms that the uuid_t is local, it moves - the nfs_uuid_t onto a per-net-ns list in NFSD's nfsd_net. - - - When each server's net-ns is shutting down - in a "pre_exit" - handler, all these nfs_uuid_t have their ->net cleared. There is - an rcu_synchronize() call between pre_exit() handlers and exit() - handlers so any caller that sees nfs_uuid_t ->net as not NULL can - safely manage the per-cpu-refcount for nfsd_serv. - - - The client's nfs_uuid_t is passed to nfsd_open_local_fh() so it - can safely dereference ->net in a private rcu_read_lock() section - to allow safe access to the associated nfsd_net and nfsd_serv. - -So LOCALIO required the introduction and use of NFSD's percpu_ref to -interlock nfsd_destroy_serv() and nfsd_open_local_fh(), to ensure each -nn->nfsd_serv is not destroyed while in use by nfsd_open_local_fh(), and +allow proper network namespace (net-ns) and NFSD object refcounting. + +LOCALIO required the introduction and use of NFSD's percpu nfsd_net_ref +to interlock nfsd_shutdown_net() and nfsd_open_local_fh(), to ensure +each net-ns is not destroyed while in use by nfsd_open_local_fh(), and warrants a more detailed explanation: - nfsd_open_local_fh() uses nfsd_serv_try_get() before opening its + nfsd_open_local_fh() uses nfsd_net_try_get() before opening its nfsd_file handle and then the caller (NFS client) must drop the - reference for the nfsd_file and associated nn->nfsd_serv using - nfs_file_put_local() once it has completed its IO. + reference for the nfsd_file and associated net-ns using + nfsd_file_put_local() once it has completed its IO. This interlock working relies heavily on nfsd_open_local_fh() being afforded the ability to safely deal with the possibility that the NFSD's net-ns (and nfsd_net by association) may have been destroyed - by nfsd_destroy_serv() via nfsd_shutdown_net() -- which is only - possible given the nfs_uuid_t ->net pointer managemenet detailed - above. - -All told, this elaborate interlock of the NFS client and server has been -verified to fix an easy to hit crash that would occur if an NFSD -instance running in a container, with a LOCALIO client mounted, is -shutdown. Upon restart of the container and associated NFSD the client -would go on to crash due to NULL pointer dereference that occurred due -to the LOCALIO client's attempting to nfsd_open_local_fh(), using -nn->nfsd_serv, without having a proper reference on nn->nfsd_serv. + by nfsd_destroy_serv() via nfsd_shutdown_net(). + +This interlock of the NFS client and server has been verified to fix an +easy to hit crash that would occur if an NFSD instance running in a +container, with a LOCALIO client mounted, is shutdown. Upon restart of +the container and associated NFSD, the client would go on to crash due +to NULL pointer dereference that occurred due to the LOCALIO client's +attempting to nfsd_open_local_fh() without having a proper reference on +NFSD's net-ns. NFS Client issues IO instead of Server ====================================== @@ -306,10 +272,26 @@ is issuing IO to the underlying local filesystem that it is sharing with the NFS server. See: fs/nfs/localio.c:nfs_local_doio() and fs/nfs/localio.c:nfs_local_commit(). +With normal NFS that makes use of RPC to issue IO to the server, if an +application uses O_DIRECT the NFS client will bypass the pagecache but +the NFS server will not. The NFS server's use of buffered IO affords +applications to be less precise with their alignment when issuing IO to +the NFS client. But if all applications properly align their IO, LOCALIO +can be configured to use end-to-end O_DIRECT semantics from the NFS +client to the underlying local filesystem, that it is sharing with +the NFS server, by setting the 'localio_O_DIRECT_semantics' nfs module +parameter to Y, e.g.: + + echo Y > /sys/module/nfs/parameters/localio_O_DIRECT_semantics + +Once enabled, it will cause LOCALIO to use end-to-end O_DIRECT semantics +(but again, this may cause IO to fail if applications do not properly +align their IO). + Security ======== -Localio is only supported when UNIX-style authentication (AUTH_UNIX, aka +LOCALIO is only supported when UNIX-style authentication (AUTH_UNIX, aka AUTH_SYS) is used. Care is taken to ensure the same NFS security mechanisms are used @@ -324,6 +306,24 @@ client is afforded this same level of access (albeit in terms of the NFS protocol via SUNRPC). No other namespaces (user, mount, etc) have been altered or purposely extended from the server to the client. +Module Parameters +================= + +/sys/module/nfs/parameters/localio_enabled (bool) +controls if LOCALIO is enabled, defaults to Y. If client and server are +local but 'localio_enabled' is set to N then LOCALIO will not be used. + +/sys/module/nfs/parameters/localio_O_DIRECT_semantics (bool) +controls if O_DIRECT extends down to the underlying filesystem, defaults +to N. Application IO must be logical blocksize aligned, otherwise +O_DIRECT will fail. + +/sys/module/nfsv3/parameters/nfs3_localio_probe_throttle (uint) +controls if NFSv3 read and write IOs will trigger (re)enabling of +LOCALIO every N (nfs3_localio_probe_throttle) IOs, defaults to 0 +(disabled). Must be power-of-2, admin keeps all the pieces if they +misconfigure (too low a value or non-power-of-2). + Testing ======= diff --git a/Documentation/userspace-api/ioctl/ioctl-number.rst b/Documentation/userspace-api/ioctl/ioctl-number.rst index bf5aff018c2f..6d1465315df3 100644 --- a/Documentation/userspace-api/ioctl/ioctl-number.rst +++ b/Documentation/userspace-api/ioctl/ioctl-number.rst @@ -312,6 +312,7 @@ Code Seq# Include File Comments <mailto:oe@port.de> 'z' 10-4F drivers/s390/crypto/zcrypt_api.h conflict! '|' 00-7F linux/media.h +'|' 80-9F samples/ Any sample and example drivers 0x80 00-1F linux/fb.h 0x81 00-1F linux/vduse.h 0x89 00-06 arch/x86/include/asm/sockios.h diff --git a/Documentation/virt/kvm/devices/vcpu.rst b/Documentation/virt/kvm/devices/vcpu.rst index 31f14ec4a65b..31a9576c07af 100644 --- a/Documentation/virt/kvm/devices/vcpu.rst +++ b/Documentation/virt/kvm/devices/vcpu.rst @@ -142,8 +142,8 @@ the cpu field to the processor id. :Architectures: ARM64 -2.1. ATTRIBUTES: KVM_ARM_VCPU_TIMER_IRQ_VTIMER, KVM_ARM_VCPU_TIMER_IRQ_PTIMER ------------------------------------------------------------------------------ +2.1. ATTRIBUTES: KVM_ARM_VCPU_TIMER_IRQ_{VTIMER,PTIMER,HVTIMER,HPTIMER} +----------------------------------------------------------------------- :Parameters: in kvm_device_attr.addr the address for the timer interrupt is a pointer to an int @@ -159,10 +159,12 @@ A value describing the architected timer interrupt number when connected to an in-kernel virtual GIC. These must be a PPI (16 <= intid < 32). Setting the attribute overrides the default values (see below). -============================= ========================================== -KVM_ARM_VCPU_TIMER_IRQ_VTIMER The EL1 virtual timer intid (default: 27) -KVM_ARM_VCPU_TIMER_IRQ_PTIMER The EL1 physical timer intid (default: 30) -============================= ========================================== +============================== ========================================== +KVM_ARM_VCPU_TIMER_IRQ_VTIMER The EL1 virtual timer intid (default: 27) +KVM_ARM_VCPU_TIMER_IRQ_PTIMER The EL1 physical timer intid (default: 30) +KVM_ARM_VCPU_TIMER_IRQ_HVTIMER The EL2 virtual timer intid (default: 28) +KVM_ARM_VCPU_TIMER_IRQ_HPTIMER The EL2 physical timer intid (default: 26) +============================== ========================================== Setting the same PPI for different timers will prevent the VCPUs from running. Setting the interrupt number on a VCPU configures all VCPUs created at that diff --git a/MAINTAINERS b/MAINTAINERS index d49306cc17e3..936e80f2c9ce 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -5366,6 +5366,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc.git F: drivers/char/ F: drivers/misc/ F: include/linux/miscdevice.h +F: samples/rust/rust_misc_device.rs X: drivers/char/agp/ X: drivers/char/hw_random/ X: drivers/char/ipmi/ @@ -7091,6 +7092,7 @@ F: include/linux/component.h DRIVER CORE, KOBJECTS, DEBUGFS AND SYSFS M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> R: "Rafael J. Wysocki" <rafael@kernel.org> +R: Danilo Krummrich <dakr@kernel.org> S: Supported T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core.git F: Documentation/core-api/kobject.rst @@ -7101,8 +7103,14 @@ F: include/linux/debugfs.h F: include/linux/fwnode.h F: include/linux/kobj* F: include/linux/property.h +F: include/linux/sysfs.h F: lib/kobj* F: rust/kernel/device.rs +F: rust/kernel/device_id.rs +F: rust/kernel/devres.rs +F: rust/kernel/driver.rs +F: rust/kernel/platform.rs +F: samples/rust/rust_driver_platform.rs DRIVERS FOR OMAP ADAPTIVE VOLTAGE SCALING (AVS) M: Nishanth Menon <nm@ti.com> @@ -17632,6 +17640,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/robh/linux.git F: Documentation/ABI/testing/sysfs-firmware-ofw F: drivers/of/ F: include/linux/of*.h +F: rust/kernel/of.rs F: scripts/dtc/ F: tools/testing/selftests/dt/ K: of_overlay_notifier_ @@ -18232,6 +18241,8 @@ F: include/asm-generic/pci* F: include/linux/of_pci.h F: include/linux/pci* F: include/uapi/linux/pci* +F: rust/kernel/pci.rs +F: samples/rust/rust_driver_pci.rs PCIE BANDWIDTH CONTROLLER M: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com> @@ -19830,6 +19841,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/rcu/linux.git rcu/dev F: Documentation/RCU/ F: include/linux/rcu* F: kernel/rcu/ +F: rust/kernel/sync/rcu.rs X: Documentation/RCU/torture.rst X: include/linux/srcu*.h X: kernel/rcu/srcu*.c diff --git a/arch/arm/include/asm/ecard.h b/arch/arm/include/asm/ecard.h index 4befe8d2ae19..7cbe001bf9cc 100644 --- a/arch/arm/include/asm/ecard.h +++ b/arch/arm/include/asm/ecard.h @@ -195,7 +195,7 @@ void __iomem *ecardm_iomap(struct expansion_card *ec, unsigned int res, unsigned long offset, unsigned long maxsize); #define ecardm_iounmap(__ec, __addr) devm_iounmap(&(__ec)->dev, __addr) -extern struct bus_type ecard_bus_type; +extern const struct bus_type ecard_bus_type; #define ECARD_DEV(_d) container_of((_d), struct expansion_card, dev) diff --git a/arch/arm/mach-rpc/ecard.c b/arch/arm/mach-rpc/ecard.c index 9f7454b8efa7..2cde4c83b7f9 100644 --- a/arch/arm/mach-rpc/ecard.c +++ b/arch/arm/mach-rpc/ecard.c @@ -1124,7 +1124,7 @@ static int ecard_match(struct device *_dev, const struct device_driver *_drv) return ret; } -struct bus_type ecard_bus_type = { +const struct bus_type ecard_bus_type = { .name = "ecard", .dev_groups = ecard_dev_groups, .match = ecard_match, diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index 488f8e751349..6f3f4142e214 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h @@ -122,6 +122,7 @@ #define QCOM_CPU_PART_KRYO_3XX_SILVER 0x803 #define QCOM_CPU_PART_KRYO_4XX_GOLD 0x804 #define QCOM_CPU_PART_KRYO_4XX_SILVER 0x805 +#define QCOM_CPU_PART_ORYON_X1 0x001 #define NVIDIA_CPU_PART_DENVER 0x003 #define NVIDIA_CPU_PART_CARMEL 0x004 @@ -198,6 +199,7 @@ #define MIDR_QCOM_KRYO_3XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_3XX_SILVER) #define MIDR_QCOM_KRYO_4XX_GOLD MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_4XX_GOLD) #define MIDR_QCOM_KRYO_4XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_4XX_SILVER) +#define MIDR_QCOM_ORYON_X1 MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_ORYON_X1) #define MIDR_NVIDIA_DENVER MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_DENVER) #define MIDR_NVIDIA_CARMEL MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_CARMEL) #define MIDR_FUJITSU_A64FX MIDR_CPU_MODEL(ARM_CPU_IMP_FUJITSU, FUJITSU_CPU_PART_A64FX) diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index 43e365fbff0b..8d94a6c0ed5c 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -300,7 +300,7 @@ #define CPTR_EL2_TSM (1 << 12) #define CPTR_EL2_TFP (1 << CPTR_EL2_TFP_SHIFT) #define CPTR_EL2_TZ (1 << 8) -#define CPTR_NVHE_EL2_RES1 0x000032ff /* known RES1 bits in CPTR_EL2 (nVHE) */ +#define CPTR_NVHE_EL2_RES1 (BIT(13) | BIT(9) | GENMASK(7, 0)) #define CPTR_NVHE_EL2_RES0 (GENMASK(63, 32) | \ GENMASK(29, 21) | \ GENMASK(19, 14) | \ diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index ca2590344313..bec227f9500a 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -53,8 +53,7 @@ enum __kvm_host_smccc_func { /* Hypercalls available only prior to pKVM finalisation */ /* __KVM_HOST_SMCCC_FUNC___kvm_hyp_init */ - __KVM_HOST_SMCCC_FUNC___kvm_get_mdcr_el2 = __KVM_HOST_SMCCC_FUNC___kvm_hyp_init + 1, - __KVM_HOST_SMCCC_FUNC___pkvm_init, + __KVM_HOST_SMCCC_FUNC___pkvm_init = __KVM_HOST_SMCCC_FUNC___kvm_hyp_init + 1, __KVM_HOST_SMCCC_FUNC___pkvm_create_private_mapping, __KVM_HOST_SMCCC_FUNC___pkvm_cpu_set_vector, __KVM_HOST_SMCCC_FUNC___kvm_enable_ssbs, @@ -65,6 +64,12 @@ enum __kvm_host_smccc_func { /* Hypercalls available after pKVM finalisation */ __KVM_HOST_SMCCC_FUNC___pkvm_host_share_hyp, __KVM_HOST_SMCCC_FUNC___pkvm_host_unshare_hyp, + __KVM_HOST_SMCCC_FUNC___pkvm_host_share_guest, + __KVM_HOST_SMCCC_FUNC___pkvm_host_unshare_guest, + __KVM_HOST_SMCCC_FUNC___pkvm_host_relax_perms_guest, + __KVM_HOST_SMCCC_FUNC___pkvm_host_wrprotect_guest, + __KVM_HOST_SMCCC_FUNC___pkvm_host_test_clear_young_guest, + __KVM_HOST_SMCCC_FUNC___pkvm_host_mkyoung_guest, __KVM_HOST_SMCCC_FUNC___kvm_adjust_pc, __KVM_HOST_SMCCC_FUNC___kvm_vcpu_run, __KVM_HOST_SMCCC_FUNC___kvm_flush_vm_context, @@ -79,6 +84,9 @@ enum __kvm_host_smccc_func { __KVM_HOST_SMCCC_FUNC___pkvm_init_vm, __KVM_HOST_SMCCC_FUNC___pkvm_init_vcpu, __KVM_HOST_SMCCC_FUNC___pkvm_teardown_vm, + __KVM_HOST_SMCCC_FUNC___pkvm_vcpu_load, + __KVM_HOST_SMCCC_FUNC___pkvm_vcpu_put, + __KVM_HOST_SMCCC_FUNC___pkvm_tlb_flush_vmid, }; #define DECLARE_KVM_VHE_SYM(sym) extern char sym[] @@ -247,8 +255,6 @@ extern void __kvm_adjust_pc(struct kvm_vcpu *vcpu); extern u64 __vgic_v3_get_gic_config(void); extern void __vgic_v3_init_lrs(void); -extern u64 __kvm_get_mdcr_el2(void); - #define __KVM_EXTABLE(from, to) \ " .pushsection __kvm_ex_table, \"a\"\n" \ " .align 3\n" \ diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index 4f1d99725f6b..47f2cf408eed 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -184,29 +184,30 @@ static inline bool vcpu_is_el2(const struct kvm_vcpu *vcpu) return vcpu_is_el2_ctxt(&vcpu->arch.ctxt); } -static inline bool __vcpu_el2_e2h_is_set(const struct kvm_cpu_context *ctxt) +static inline bool vcpu_el2_e2h_is_set(const struct kvm_vcpu *vcpu) { return (!cpus_have_final_cap(ARM64_HAS_HCR_NV1) || - (ctxt_sys_reg(ctxt, HCR_EL2) & HCR_E2H)); + (__vcpu_sys_reg(vcpu, HCR_EL2) & HCR_E2H)); } -static inline bool vcpu_el2_e2h_is_set(const struct kvm_vcpu *vcpu) +static inline bool vcpu_el2_tge_is_set(const struct kvm_vcpu *vcpu) { - return __vcpu_el2_e2h_is_set(&vcpu->arch.ctxt); + return ctxt_sys_reg(&vcpu->arch.ctxt, HCR_EL2) & HCR_TGE; } -static inline bool __vcpu_el2_tge_is_set(const struct kvm_cpu_context *ctxt) +static inline bool is_hyp_ctxt(const struct kvm_vcpu *vcpu) { - return ctxt_sys_reg(ctxt, HCR_EL2) & HCR_TGE; -} + bool e2h, tge; + u64 hcr; -static inline bool vcpu_el2_tge_is_set(const struct kvm_vcpu *vcpu) -{ - return __vcpu_el2_tge_is_set(&vcpu->arch.ctxt); -} + if (!vcpu_has_nv(vcpu)) + return false; + + hcr = __vcpu_sys_reg(vcpu, HCR_EL2); + + e2h = (hcr & HCR_E2H); + tge = (hcr & HCR_TGE); -static inline bool __is_hyp_ctxt(const struct kvm_cpu_context *ctxt) -{ /* * We are in a hypervisor context if the vcpu mode is EL2 or * E2H and TGE bits are set. The latter means we are in the user space @@ -215,14 +216,7 @@ static inline bool __is_hyp_ctxt(const struct kvm_cpu_context *ctxt) * Note that the HCR_EL2.{E2H,TGE}={0,1} isn't really handled in the * rest of the KVM code, and will result in a misbehaving guest. */ - return vcpu_is_el2_ctxt(ctxt) || - (__vcpu_el2_e2h_is_set(ctxt) && __vcpu_el2_tge_is_set(ctxt)) || - __vcpu_el2_tge_is_set(ctxt); -} - -static inline bool is_hyp_ctxt(const struct kvm_vcpu *vcpu) -{ - return vcpu_has_nv(vcpu) && __is_hyp_ctxt(&vcpu->arch.ctxt); + return vcpu_is_el2(vcpu) || (e2h && tge) || tge; } static inline bool vcpu_is_host_el0(const struct kvm_vcpu *vcpu) @@ -619,7 +613,8 @@ static __always_inline void kvm_write_cptr_el2(u64 val) write_sysreg(val, cptr_el2); } -static __always_inline u64 kvm_get_reset_cptr_el2(struct kvm_vcpu *vcpu) +/* Resets the value of cptr_el2 when returning to the host. */ +static __always_inline void __kvm_reset_cptr_el2(struct kvm *kvm) { u64 val; @@ -630,29 +625,28 @@ static __always_inline u64 kvm_get_reset_cptr_el2(struct kvm_vcpu *vcpu) } else if (has_hvhe()) { val = CPACR_EL1_FPEN; - if (!vcpu_has_sve(vcpu) || !guest_owns_fp_regs()) + if (!kvm_has_sve(kvm) || !guest_owns_fp_regs()) val |= CPACR_EL1_ZEN; if (cpus_have_final_cap(ARM64_SME)) val |= CPACR_EL1_SMEN; } else { val = CPTR_NVHE_EL2_RES1; - if (vcpu_has_sve(vcpu) && guest_owns_fp_regs()) + if (kvm_has_sve(kvm) && guest_owns_fp_regs()) val |= CPTR_EL2_TZ; - if (cpus_have_final_cap(ARM64_SME)) - val &= ~CPTR_EL2_TSM; + if (!cpus_have_final_cap(ARM64_SME)) + val |= CPTR_EL2_TSM; } - return val; -} - -static __always_inline void kvm_reset_cptr_el2(struct kvm_vcpu *vcpu) -{ - u64 val = kvm_get_reset_cptr_el2(vcpu); - kvm_write_cptr_el2(val); } +#ifdef __KVM_NVHE_HYPERVISOR__ +#define kvm_reset_cptr_el2(v) __kvm_reset_cptr_el2(kern_hyp_va((v)->kvm)) +#else +#define kvm_reset_cptr_el2(v) __kvm_reset_cptr_el2((v)->kvm) +#endif + /* * Returns a 'sanitised' view of CPTR_EL2, translating from nVHE to the VHE * format if E2H isn't set. @@ -697,9 +691,4 @@ static inline bool guest_hyp_sve_traps_enabled(const struct kvm_vcpu *vcpu) { return __guest_hyp_cptr_xen_trap_enabled(vcpu, ZEN); } - -static inline void kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu) -{ - vcpu_set_flag(vcpu, GUEST_HAS_PTRAUTH); -} #endif /* __ARM64_KVM_EMULATE_H__ */ diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index e18e9244d17a..7cfa024de4e3 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -85,6 +85,7 @@ void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu); struct kvm_hyp_memcache { phys_addr_t head; unsigned long nr_pages; + struct pkvm_mapping *mapping; /* only used from EL1 */ }; static inline void push_hyp_memcache(struct kvm_hyp_memcache *mc, @@ -331,6 +332,8 @@ struct kvm_arch { #define KVM_ARCH_FLAG_ID_REGS_INITIALIZED 7 /* Fine-Grained UNDEF initialised */ #define KVM_ARCH_FLAG_FGU_INITIALIZED 8 + /* SVE exposed to guest */ +#define KVM_ARCH_FLAG_GUEST_HAS_SVE 9 unsigned long flags; /* VM-wide vCPU feature set */ @@ -490,7 +493,6 @@ enum vcpu_sysreg { VBAR_EL2, /* Vector Base Address Register (EL2) */ RVBAR_EL2, /* Reset Vector Base Address Register */ CONTEXTIDR_EL2, /* Context ID Register (EL2) */ - CNTHCTL_EL2, /* Counter-timer Hypervisor Control register */ SP_EL2, /* EL2 Stack Pointer */ CNTHP_CTL_EL2, CNTHP_CVAL_EL2, @@ -501,6 +503,7 @@ enum vcpu_sysreg { MARKER(__SANITISED_REG_START__), TCR2_EL2, /* Extended Translation Control Register (EL2) */ MDCR_EL2, /* Monitor Debug Configuration Register (EL2) */ + CNTHCTL_EL2, /* Counter-timer Hypervisor Control register */ /* Any VNCR-capable reg goes after this point */ MARKER(__VNCR_START__), @@ -610,6 +613,14 @@ struct cpu_sve_state { * field. */ struct kvm_host_data { +#define KVM_HOST_DATA_FLAG_HAS_SPE 0 +#define KVM_HOST_DATA_FLAG_HAS_TRBE 1 +#define KVM_HOST_DATA_FLAG_HOST_SVE_ENABLED 2 +#define KVM_HOST_DATA_FLAG_HOST_SME_ENABLED 3 +#define KVM_HOST_DATA_FLAG_TRBE_ENABLED 4 +#define KVM_HOST_DATA_FLAG_EL1_TRACING_CONFIGURED 5 + unsigned long flags; + struct kvm_cpu_context host_ctxt; /* @@ -642,7 +653,7 @@ struct kvm_host_data { * host_debug_state contains the host registers which are * saved and restored during world switches. */ - struct { + struct { /* {Break,watch}point registers */ struct kvm_guest_debug_arch regs; /* Statistical profiling extension */ @@ -652,6 +663,16 @@ struct kvm_host_data { /* Values of trap registers for the host before guest entry. */ u64 mdcr_el2; } host_debug_state; + + /* Guest trace filter value */ + u64 trfcr_while_in_guest; + + /* Number of programmable event counters (PMCR_EL0.N) for this CPU */ + unsigned int nr_event_counters; + + /* Number of debug breakpoints/watchpoints for this CPU (minus 1) */ + unsigned int debug_brps; + unsigned int debug_wrps; }; struct kvm_host_psci_config { @@ -708,7 +729,6 @@ struct kvm_vcpu_arch { u64 hcr_el2; u64 hcrx_el2; u64 mdcr_el2; - u64 cptr_el2; /* Exception Information */ struct kvm_vcpu_fault_info fault; @@ -739,31 +759,22 @@ struct kvm_vcpu_arch { * * external_debug_state contains the debug values we want to debug the * guest. This is set via the KVM_SET_GUEST_DEBUG ioctl. - * - * debug_ptr points to the set of debug registers that should be loaded - * onto the hardware when running the guest. */ - struct kvm_guest_debug_arch *debug_ptr; struct kvm_guest_debug_arch vcpu_debug_state; struct kvm_guest_debug_arch external_debug_state; + u64 external_mdscr_el1; + + enum { + VCPU_DEBUG_FREE, + VCPU_DEBUG_HOST_OWNED, + VCPU_DEBUG_GUEST_OWNED, + } debug_owner; /* VGIC state */ struct vgic_cpu vgic_cpu; struct arch_timer_cpu timer_cpu; struct kvm_pmu pmu; - /* - * Guest registers we preserve during guest debugging. - * - * These shadow registers are updated by the kvm_handle_sys_reg - * trap handler if the guest accesses or updates them while we - * are using guest debug. - */ - struct { - u32 mdscr_el1; - bool pstate_ss; - } guest_debug_preserved; - /* vcpu power state */ struct kvm_mp_state mp_state; spinlock_t mp_state_lock; @@ -771,6 +782,9 @@ struct kvm_vcpu_arch { /* Cache some mmu pages needed inside spinlock regions */ struct kvm_mmu_memory_cache mmu_page_cache; + /* Pages to top-up the pKVM/EL2 guest pool */ + struct kvm_hyp_memcache pkvm_memcache; + /* Virtual SError ESR to restore when HCR_EL2.VSE is set */ u64 vsesr_el2; @@ -863,14 +877,10 @@ struct kvm_vcpu_arch { #define vcpu_set_flag(v, ...) __vcpu_set_flag((v), __VA_ARGS__) #define vcpu_clear_flag(v, ...) __vcpu_clear_flag((v), __VA_ARGS__) -/* SVE exposed to guest */ -#define GUEST_HAS_SVE __vcpu_single_flag(cflags, BIT(0)) +/* KVM_ARM_VCPU_INIT completed */ +#define VCPU_INITIALIZED __vcpu_single_flag(cflags, BIT(0)) /* SVE config completed */ #define VCPU_SVE_FINALIZED __vcpu_single_flag(cflags, BIT(1)) -/* PTRAUTH exposed to guest */ -#define GUEST_HAS_PTRAUTH __vcpu_single_flag(cflags, BIT(2)) -/* KVM_ARM_VCPU_INIT completed */ -#define VCPU_INITIALIZED __vcpu_single_flag(cflags, BIT(3)) /* Exception pending */ #define PENDING_EXCEPTION __vcpu_single_flag(iflags, BIT(0)) @@ -906,29 +916,21 @@ struct kvm_vcpu_arch { #define EXCEPT_AA64_EL2_IRQ __vcpu_except_flags(5) #define EXCEPT_AA64_EL2_FIQ __vcpu_except_flags(6) #define EXCEPT_AA64_EL2_SERR __vcpu_except_flags(7) -/* Guest debug is live */ -#define DEBUG_DIRTY __vcpu_single_flag(iflags, BIT(4)) -/* Save SPE context if active */ -#define DEBUG_STATE_SAVE_SPE __vcpu_single_flag(iflags, BIT(5)) -/* Save TRBE context if active */ -#define DEBUG_STATE_SAVE_TRBE __vcpu_single_flag(iflags, BIT(6)) - -/* SVE enabled for host EL0 */ -#define HOST_SVE_ENABLED __vcpu_single_flag(sflags, BIT(0)) -/* SME enabled for EL0 */ -#define HOST_SME_ENABLED __vcpu_single_flag(sflags, BIT(1)) + /* Physical CPU not in supported_cpus */ -#define ON_UNSUPPORTED_CPU __vcpu_single_flag(sflags, BIT(2)) +#define ON_UNSUPPORTED_CPU __vcpu_single_flag(sflags, BIT(0)) /* WFIT instruction trapped */ -#define IN_WFIT __vcpu_single_flag(sflags, BIT(3)) +#define IN_WFIT __vcpu_single_flag(sflags, BIT(1)) /* vcpu system registers loaded on physical CPU */ -#define SYSREGS_ON_CPU __vcpu_single_flag(sflags, BIT(4)) -/* Software step state is Active-pending */ -#define DBG_SS_ACTIVE_PENDING __vcpu_single_flag(sflags, BIT(5)) +#define SYSREGS_ON_CPU __vcpu_single_flag(sflags, BIT(2)) +/* Software step state is Active-pending for external debug */ +#define HOST_SS_ACTIVE_PENDING __vcpu_single_flag(sflags, BIT(3)) +/* Software step state is Active pending for guest debug */ +#define GUEST_SS_ACTIVE_PENDING __vcpu_single_flag(sflags, BIT(4)) /* PMUSERENR for the guest EL0 is on physical CPU */ -#define PMUSERENR_ON_CPU __vcpu_single_flag(sflags, BIT(6)) +#define PMUSERENR_ON_CPU __vcpu_single_flag(sflags, BIT(5)) /* WFI instruction trapped */ -#define IN_WFI __vcpu_single_flag(sflags, BIT(7)) +#define IN_WFI __vcpu_single_flag(sflags, BIT(6)) /* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */ @@ -959,14 +961,21 @@ struct kvm_vcpu_arch { KVM_GUESTDBG_USE_HW | \ KVM_GUESTDBG_SINGLESTEP) -#define vcpu_has_sve(vcpu) (system_supports_sve() && \ - vcpu_get_flag(vcpu, GUEST_HAS_SVE)) +#define kvm_has_sve(kvm) (system_supports_sve() && \ + test_bit(KVM_ARCH_FLAG_GUEST_HAS_SVE, &(kvm)->arch.flags)) + +#ifdef __KVM_NVHE_HYPERVISOR__ +#define vcpu_has_sve(vcpu) kvm_has_sve(kern_hyp_va((vcpu)->kvm)) +#else +#define vcpu_has_sve(vcpu) kvm_has_sve((vcpu)->kvm) +#endif #ifdef CONFIG_ARM64_PTR_AUTH #define vcpu_has_ptrauth(vcpu) \ ((cpus_have_final_cap(ARM64_HAS_ADDRESS_AUTH) || \ cpus_have_final_cap(ARM64_HAS_GENERIC_AUTH)) && \ - vcpu_get_flag(vcpu, GUEST_HAS_PTRAUTH)) + (vcpu_has_feature(vcpu, KVM_ARM_VCPU_PTRAUTH_ADDRESS) || \ + vcpu_has_feature(vcpu, KVM_ARM_VCPU_PTRAUTH_GENERIC))) #else #define vcpu_has_ptrauth(vcpu) false #endif @@ -1307,6 +1316,13 @@ DECLARE_KVM_HYP_PER_CPU(struct kvm_host_data, kvm_host_data); &this_cpu_ptr_hyp_sym(kvm_host_data)->f) #endif +#define host_data_test_flag(flag) \ + (test_bit(KVM_HOST_DATA_FLAG_##flag, host_data_ptr(flags))) +#define host_data_set_flag(flag) \ + set_bit(KVM_HOST_DATA_FLAG_##flag, host_data_ptr(flags)) +#define host_data_clear_flag(flag) \ + clear_bit(KVM_HOST_DATA_FLAG_##flag, host_data_ptr(flags)) + /* Check whether the FP regs are owned by the guest */ static inline bool guest_owns_fp_regs(void) { @@ -1332,15 +1348,22 @@ static inline bool kvm_system_needs_idmapped_vectors(void) static inline void kvm_arch_sync_events(struct kvm *kvm) {} -void kvm_arm_init_debug(void); -void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu); -void kvm_arm_setup_debug(struct kvm_vcpu *vcpu); -void kvm_arm_clear_debug(struct kvm_vcpu *vcpu); -void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu); +void kvm_init_host_debug_data(void); +void kvm_vcpu_load_debug(struct kvm_vcpu *vcpu); +void kvm_vcpu_put_debug(struct kvm_vcpu *vcpu); +void kvm_debug_set_guest_ownership(struct kvm_vcpu *vcpu); +void kvm_debug_handle_oslar(struct kvm_vcpu *vcpu, u64 val); #define kvm_vcpu_os_lock_enabled(vcpu) \ (!!(__vcpu_sys_reg(vcpu, OSLSR_EL1) & OSLSR_EL1_OSLK)) +#define kvm_debug_regs_in_use(vcpu) \ + ((vcpu)->arch.debug_owner != VCPU_DEBUG_FREE) +#define kvm_host_owns_debug_regs(vcpu) \ + ((vcpu)->arch.debug_owner == VCPU_DEBUG_HOST_OWNED) +#define kvm_guest_owns_debug_regs(vcpu) \ + ((vcpu)->arch.debug_owner == VCPU_DEBUG_GUEST_OWNED) + int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr); int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu, @@ -1367,14 +1390,13 @@ static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr) return (!has_vhe() && attr->exclude_host); } -/* Flags for host debug state */ -void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu); -void kvm_arch_vcpu_put_debug_state_flags(struct kvm_vcpu *vcpu); - #ifdef CONFIG_KVM void kvm_set_pmu_events(u64 set, struct perf_event_attr *attr); void kvm_clr_pmu_events(u64 clr); bool kvm_set_pmuserenr(u64 val); +void kvm_enable_trbe(void); +void kvm_disable_trbe(void); +void kvm_tracing_set_el1_configuration(u64 trfcr_while_in_guest); #else static inline void kvm_set_pmu_events(u64 set, struct perf_event_attr *attr) {} static inline void kvm_clr_pmu_events(u64 clr) {} @@ -1382,6 +1404,9 @@ static inline bool kvm_set_pmuserenr(u64 val) { return false; } +static inline void kvm_enable_trbe(void) {} +static inline void kvm_disable_trbe(void) {} +static inline void kvm_tracing_set_el1_configuration(u64 trfcr_while_in_guest) {} #endif void kvm_vcpu_load_vhe(struct kvm_vcpu *vcpu); @@ -1422,6 +1447,7 @@ static inline bool __vcpu_has_feature(const struct kvm_arch *ka, int feature) return test_bit(feature, ka->vcpu_features); } +#define kvm_vcpu_has_feature(k, f) __vcpu_has_feature(&(k)->arch, (f)) #define vcpu_has_feature(v, f) __vcpu_has_feature(&(v)->kvm->arch, (f)) #define kvm_vcpu_initialized(v) vcpu_get_flag(vcpu, VCPU_INITIALIZED) diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 66d93e320ec8..b98ac6aa631f 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -139,6 +139,8 @@ static __always_inline unsigned long __kern_hyp_va(unsigned long v) #define kern_hyp_va(v) ((typeof(v))(__kern_hyp_va((unsigned long)(v)))) +extern u32 __hyp_va_bits; + /* * We currently support using a VM-specified IPA size. For backward * compatibility, the default IPA size is fixed to 40bits. @@ -353,6 +355,22 @@ static inline bool kvm_is_nested_s2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu) return &kvm->arch.mmu != mmu; } +static inline void kvm_fault_lock(struct kvm *kvm) +{ + if (is_protected_kvm_enabled()) + write_lock(&kvm->mmu_lock); + else + read_lock(&kvm->mmu_lock); +} + +static inline void kvm_fault_unlock(struct kvm *kvm) +{ + if (is_protected_kvm_enabled()) + write_unlock(&kvm->mmu_lock); + else + read_unlock(&kvm->mmu_lock); +} + #ifdef CONFIG_PTDUMP_STAGE2_DEBUGFS void kvm_s2_ptdump_create_debugfs(struct kvm *kvm); #else diff --git a/arch/arm64/include/asm/kvm_nested.h b/arch/arm64/include/asm/kvm_nested.h index 6cd08198bf19..56c4bcd35e2e 100644 --- a/arch/arm64/include/asm/kvm_nested.h +++ b/arch/arm64/include/asm/kvm_nested.h @@ -64,6 +64,7 @@ static inline u64 translate_ttbr0_el2_to_ttbr0_el1(u64 ttbr0) } extern bool forward_smc_trap(struct kvm_vcpu *vcpu); +extern bool forward_debug_exception(struct kvm_vcpu *vcpu); extern void kvm_init_nested(struct kvm *kvm); extern int kvm_vcpu_init_nested(struct kvm_vcpu *vcpu); extern void kvm_init_nested_s2_mmu(struct kvm_s2_mmu *mmu); @@ -186,7 +187,7 @@ static inline bool kvm_supported_tlbi_s1e2_op(struct kvm_vcpu *vpcu, u32 instr) return true; } -int kvm_init_nv_sysregs(struct kvm *kvm); +int kvm_init_nv_sysregs(struct kvm_vcpu *vcpu); #ifdef CONFIG_ARM64_PTR_AUTH bool kvm_auth_eretax(struct kvm_vcpu *vcpu, u64 *elr); diff --git a/arch/arm64/include/asm/kvm_pgtable.h b/arch/arm64/include/asm/kvm_pgtable.h index aab04097b505..6b9d274052c7 100644 --- a/arch/arm64/include/asm/kvm_pgtable.h +++ b/arch/arm64/include/asm/kvm_pgtable.h @@ -412,15 +412,20 @@ static inline bool kvm_pgtable_walk_lock_held(void) * be used instead of block mappings. */ struct kvm_pgtable { - u32 ia_bits; - s8 start_level; - kvm_pteref_t pgd; - struct kvm_pgtable_mm_ops *mm_ops; - - /* Stage-2 only */ - struct kvm_s2_mmu *mmu; - enum kvm_pgtable_stage2_flags flags; - kvm_pgtable_force_pte_cb_t force_pte_cb; + union { + struct rb_root pkvm_mappings; + struct { + u32 ia_bits; + s8 start_level; + kvm_pteref_t pgd; + struct kvm_pgtable_mm_ops *mm_ops; + + /* Stage-2 only */ + enum kvm_pgtable_stage2_flags flags; + kvm_pgtable_force_pte_cb_t force_pte_cb; + }; + }; + struct kvm_s2_mmu *mmu; }; /** @@ -526,8 +531,11 @@ int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu, enum kvm_pgtable_stage2_flags flags, kvm_pgtable_force_pte_cb_t force_pte_cb); -#define kvm_pgtable_stage2_init(pgt, mmu, mm_ops) \ - __kvm_pgtable_stage2_init(pgt, mmu, mm_ops, 0, NULL) +static inline int kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu, + struct kvm_pgtable_mm_ops *mm_ops) +{ + return __kvm_pgtable_stage2_init(pgt, mmu, mm_ops, 0, NULL); +} /** * kvm_pgtable_stage2_destroy() - Destroy an unused guest stage-2 page-table. @@ -669,13 +677,15 @@ int kvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size); * kvm_pgtable_stage2_mkyoung() - Set the access flag in a page-table entry. * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*(). * @addr: Intermediate physical address to identify the page-table entry. + * @flags: Flags to control the page-table walk (ex. a shared walk) * * The offset of @addr within a page is ignored. * * If there is a valid, leaf page-table entry used to translate @addr, then * set the access flag in that entry. */ -void kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr); +void kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr, + enum kvm_pgtable_walk_flags flags); /** * kvm_pgtable_stage2_test_clear_young() - Test and optionally clear the access @@ -705,6 +715,7 @@ bool kvm_pgtable_stage2_test_clear_young(struct kvm_pgtable *pgt, u64 addr, * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*(). * @addr: Intermediate physical address to identify the page-table entry. * @prot: Additional permissions to grant for the mapping. + * @flags: Flags to control the page-table walk (ex. a shared walk) * * The offset of @addr within a page is ignored. * @@ -717,7 +728,8 @@ bool kvm_pgtable_stage2_test_clear_young(struct kvm_pgtable *pgt, u64 addr, * Return: 0 on success, negative error code on failure. */ int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr, - enum kvm_pgtable_prot prot); + enum kvm_pgtable_prot prot, + enum kvm_pgtable_walk_flags flags); /** * kvm_pgtable_stage2_flush_range() - Clean and invalidate data cache to Point diff --git a/arch/arm64/include/asm/kvm_pkvm.h b/arch/arm64/include/asm/kvm_pkvm.h index cd56acd9a842..eb65f12e81d9 100644 --- a/arch/arm64/include/asm/kvm_pkvm.h +++ b/arch/arm64/include/asm/kvm_pkvm.h @@ -20,6 +20,31 @@ int pkvm_init_host_vm(struct kvm *kvm); int pkvm_create_hyp_vm(struct kvm *kvm); void pkvm_destroy_hyp_vm(struct kvm *kvm); +/* + * This functions as an allow-list of protected VM capabilities. + * Features not explicitly allowed by this function are denied. + */ +static inline bool kvm_pvm_ext_allowed(long ext) +{ + switch (ext) { + case KVM_CAP_IRQCHIP: + case KVM_CAP_ARM_PSCI: + case KVM_CAP_ARM_PSCI_0_2: + case KVM_CAP_NR_VCPUS: + case KVM_CAP_MAX_VCPUS: + case KVM_CAP_MAX_VCPU_ID: + case KVM_CAP_MSI_DEVID: + case KVM_CAP_ARM_VM_IPA_SIZE: + case KVM_CAP_ARM_PMU_V3: + case KVM_CAP_ARM_SVE: + case KVM_CAP_ARM_PTRAUTH_ADDRESS: + case KVM_CAP_ARM_PTRAUTH_GENERIC: + return true; + default: + return false; + } +} + extern struct memblock_region kvm_nvhe_sym(hyp_memory)[]; extern unsigned int kvm_nvhe_sym(hyp_memblock_nr); @@ -137,4 +162,30 @@ static inline size_t pkvm_host_sve_state_size(void) SVE_SIG_REGS_SIZE(sve_vq_from_vl(kvm_host_sve_max_vl))); } +struct pkvm_mapping { + struct rb_node node; + u64 gfn; + u64 pfn; +}; + +int pkvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu, + struct kvm_pgtable_mm_ops *mm_ops); +void pkvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt); +int pkvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys, + enum kvm_pgtable_prot prot, void *mc, + enum kvm_pgtable_walk_flags flags); +int pkvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size); +int pkvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size); +int pkvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size); +bool pkvm_pgtable_stage2_test_clear_young(struct kvm_pgtable *pgt, u64 addr, u64 size, bool mkold); +int pkvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr, enum kvm_pgtable_prot prot, + enum kvm_pgtable_walk_flags flags); +void pkvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr, + enum kvm_pgtable_walk_flags flags); +int pkvm_pgtable_stage2_split(struct kvm_pgtable *pgt, u64 addr, u64 size, + struct kvm_mmu_memory_cache *mc); +void pkvm_pgtable_stage2_free_unlinked(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, s8 level); +kvm_pte_t *pkvm_pgtable_stage2_create_unlinked(struct kvm_pgtable *pgt, u64 phys, s8 level, + enum kvm_pgtable_prot prot, void *mc, + bool force_pte); #endif /* __ARM64_KVM_PKVM_H__ */ diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 8b9f33cf561b..717829df294e 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -145,13 +145,16 @@ #define OVERFLOW_STACK_SIZE SZ_4K +#define NVHE_STACK_SHIFT PAGE_SHIFT +#define NVHE_STACK_SIZE (UL(1) << NVHE_STACK_SHIFT) + /* * With the minimum frame size of [x29, x30], exactly half the combined * sizes of the hyp and overflow stacks is the maximum size needed to * save the unwinded stacktrace; plus an additional entry to delimit the * end. */ -#define NVHE_STACKTRACE_SIZE ((OVERFLOW_STACK_SIZE + PAGE_SIZE) / 2 + sizeof(long)) +#define NVHE_STACKTRACE_SIZE ((OVERFLOW_STACK_SIZE + NVHE_STACK_SIZE) / 2 + sizeof(long)) /* * Alignment of kernel segments (e.g. .text, .data). diff --git a/arch/arm64/include/asm/stacktrace/nvhe.h b/arch/arm64/include/asm/stacktrace/nvhe.h index 44759281d0d4..171f9edef49f 100644 --- a/arch/arm64/include/asm/stacktrace/nvhe.h +++ b/arch/arm64/include/asm/stacktrace/nvhe.h @@ -47,7 +47,7 @@ static inline void kvm_nvhe_unwind_init(struct unwind_state *state, DECLARE_KVM_NVHE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack); DECLARE_KVM_NVHE_PER_CPU(struct kvm_nvhe_stacktrace_info, kvm_stacktrace_info); -DECLARE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page); +DECLARE_PER_CPU(unsigned long, kvm_arm_hyp_stack_base); void kvm_nvhe_dump_backtrace(unsigned long hyp_offset); diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index b8303a83c0bf..05ea5223d2d5 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -283,8 +283,6 @@ #define SYS_RGSR_EL1 sys_reg(3, 0, 1, 0, 5) #define SYS_GCR_EL1 sys_reg(3, 0, 1, 0, 6) -#define SYS_TRFCR_EL1 sys_reg(3, 0, 1, 2, 1) - #define SYS_TCR_EL1 sys_reg(3, 0, 2, 0, 2) #define SYS_APIAKEYLO_EL1 sys_reg(3, 0, 2, 1, 0) @@ -477,6 +475,7 @@ #define SYS_CNTFRQ_EL0 sys_reg(3, 3, 14, 0, 0) #define SYS_CNTPCT_EL0 sys_reg(3, 3, 14, 0, 1) +#define SYS_CNTVCT_EL0 sys_reg(3, 3, 14, 0, 2) #define SYS_CNTPCTSS_EL0 sys_reg(3, 3, 14, 0, 5) #define SYS_CNTVCTSS_EL0 sys_reg(3, 3, 14, 0, 6) @@ -484,14 +483,17 @@ #define SYS_CNTP_CTL_EL0 sys_reg(3, 3, 14, 2, 1) #define SYS_CNTP_CVAL_EL0 sys_reg(3, 3, 14, 2, 2) +#define SYS_CNTV_TVAL_EL0 sys_reg(3, 3, 14, 3, 0) #define SYS_CNTV_CTL_EL0 sys_reg(3, 3, 14, 3, 1) #define SYS_CNTV_CVAL_EL0 sys_reg(3, 3, 14, 3, 2) #define SYS_AARCH32_CNTP_TVAL sys_reg(0, 0, 14, 2, 0) #define SYS_AARCH32_CNTP_CTL sys_reg(0, 0, 14, 2, 1) #define SYS_AARCH32_CNTPCT sys_reg(0, 0, 0, 14, 0) +#define SYS_AARCH32_CNTVCT sys_reg(0, 1, 0, 14, 0) #define SYS_AARCH32_CNTP_CVAL sys_reg(0, 2, 0, 14, 0) #define SYS_AARCH32_CNTPCTSS sys_reg(0, 8, 0, 14, 0) +#define SYS_AARCH32_CNTVCTSS sys_reg(0, 9, 0, 14, 0) #define __PMEV_op2(n) ((n) & 0x7) #define __CNTR_CRm(n) (0x8 | (((n) >> 3) & 0x3)) @@ -519,7 +521,6 @@ #define SYS_VTTBR_EL2 sys_reg(3, 4, 2, 1, 0) #define SYS_VTCR_EL2 sys_reg(3, 4, 2, 1, 2) -#define SYS_TRFCR_EL2 sys_reg(3, 4, 1, 2, 1) #define SYS_VNCR_EL2 sys_reg(3, 4, 2, 2, 0) #define SYS_HAFGRTR_EL2 sys_reg(3, 4, 3, 1, 6) #define SYS_SPSR_EL2 sys_reg(3, 4, 4, 0, 0) @@ -983,15 +984,6 @@ /* Safe value for MPIDR_EL1: Bit31:RES1, Bit30:U:0, Bit24:MT:0 */ #define SYS_MPIDR_SAFE_VAL (BIT(31)) -#define TRFCR_ELx_TS_SHIFT 5 -#define TRFCR_ELx_TS_MASK ((0x3UL) << TRFCR_ELx_TS_SHIFT) -#define TRFCR_ELx_TS_VIRTUAL ((0x1UL) << TRFCR_ELx_TS_SHIFT) -#define TRFCR_ELx_TS_GUEST_PHYSICAL ((0x2UL) << TRFCR_ELx_TS_SHIFT) -#define TRFCR_ELx_TS_PHYSICAL ((0x3UL) << TRFCR_ELx_TS_SHIFT) -#define TRFCR_EL2_CX BIT(3) -#define TRFCR_ELx_ExTRE BIT(1) -#define TRFCR_ELx_E0TRE BIT(0) - /* GIC Hypervisor interface registers */ /* ICH_MISR_EL2 bit definitions */ #define ICH_MISR_EOI (1 << 0) diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index a78f247029ae..7ce555862895 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -787,5 +787,13 @@ const struct arm64_cpu_capabilities arm64_errata[] = { }, #endif { + .desc = "Broken CNTVOFF_EL2", + .capability = ARM64_WORKAROUND_QCOM_ORYON_CNTVOFF, + ERRATA_MIDR_RANGE_LIST(((const struct midr_range[]) { + MIDR_ALL_VERSIONS(MIDR_QCOM_ORYON_X1), + {} + })), + }, + { } }; diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h index 8f5422ed1b75..ef3a69cc398e 100644 --- a/arch/arm64/kernel/image-vars.h +++ b/arch/arm64/kernel/image-vars.h @@ -105,6 +105,9 @@ KVM_NVHE_ALIAS(__hyp_stub_vectors); KVM_NVHE_ALIAS(vgic_v2_cpuif_trap); KVM_NVHE_ALIAS(vgic_v3_cpuif_trap); +/* Static key which is set if CNTVOFF_EL2 is unusable */ +KVM_NVHE_ALIAS(broken_cntvoff_key); + /* EL2 exception handling */ KVM_NVHE_ALIAS(__start___kvm_ex_table); KVM_NVHE_ALIAS(__stop___kvm_ex_table); diff --git a/arch/arm64/kvm/arch_timer.c b/arch/arm64/kvm/arch_timer.c index 1215df590418..d3d243366536 100644 --- a/arch/arm64/kvm/arch_timer.c +++ b/arch/arm64/kvm/arch_timer.c @@ -30,6 +30,7 @@ static u32 host_vtimer_irq_flags; static u32 host_ptimer_irq_flags; static DEFINE_STATIC_KEY_FALSE(has_gic_active_state); +DEFINE_STATIC_KEY_FALSE(broken_cntvoff_key); static const u8 default_ppi[] = { [TIMER_PTIMER] = 30, @@ -101,21 +102,6 @@ u64 timer_get_cval(struct arch_timer_context *ctxt) } } -static u64 timer_get_offset(struct arch_timer_context *ctxt) -{ - u64 offset = 0; - - if (!ctxt) - return 0; - - if (ctxt->offset.vm_offset) - offset += *ctxt->offset.vm_offset; - if (ctxt->offset.vcpu_offset) - offset += *ctxt->offset.vcpu_offset; - - return offset; -} - static void timer_set_ctl(struct arch_timer_context *ctxt, u32 ctl) { struct kvm_vcpu *vcpu = ctxt->vcpu; @@ -441,11 +427,30 @@ void kvm_timer_update_run(struct kvm_vcpu *vcpu) regs->device_irq_level |= KVM_ARM_DEV_EL1_PTIMER; } +static void kvm_timer_update_status(struct arch_timer_context *ctx, bool level) +{ + /* + * Paper over NV2 brokenness by publishing the interrupt status + * bit. This still results in a poor quality of emulation (guest + * writes will have no effect until the next exit). + * + * But hey, it's fast, right? + */ + if (is_hyp_ctxt(ctx->vcpu) && + (ctx == vcpu_vtimer(ctx->vcpu) || ctx == vcpu_ptimer(ctx->vcpu))) { + unsigned long val = timer_get_ctl(ctx); + __assign_bit(__ffs(ARCH_TIMER_CTRL_IT_STAT), &val, level); + timer_set_ctl(ctx, val); + } +} + static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level, struct arch_timer_context *timer_ctx) { int ret; + kvm_timer_update_status(timer_ctx, new_level); + timer_ctx->irq.level = new_level; trace_kvm_timer_update_irq(vcpu->vcpu_id, timer_irq(timer_ctx), timer_ctx->irq.level); @@ -471,6 +476,8 @@ static void timer_emulate(struct arch_timer_context *ctx) return; } + kvm_timer_update_status(ctx, should_fire); + /* * If the timer can fire now, we don't need to have a soft timer * scheduled for the future. If the timer cannot fire at all, @@ -513,7 +520,12 @@ static void timer_save_state(struct arch_timer_context *ctx) case TIMER_VTIMER: case TIMER_HVTIMER: timer_set_ctl(ctx, read_sysreg_el0(SYS_CNTV_CTL)); - timer_set_cval(ctx, read_sysreg_el0(SYS_CNTV_CVAL)); + cval = read_sysreg_el0(SYS_CNTV_CVAL); + + if (has_broken_cntvoff()) + cval -= timer_get_offset(ctx); + + timer_set_cval(ctx, cval); /* Disable the timer */ write_sysreg_el0(0, SYS_CNTV_CTL); @@ -618,8 +630,15 @@ static void timer_restore_state(struct arch_timer_context *ctx) case TIMER_VTIMER: case TIMER_HVTIMER: - set_cntvoff(timer_get_offset(ctx)); - write_sysreg_el0(timer_get_cval(ctx), SYS_CNTV_CVAL); + cval = timer_get_cval(ctx); + offset = timer_get_offset(ctx); + if (has_broken_cntvoff()) { + set_cntvoff(0); + cval += offset; + } else { + set_cntvoff(offset); + } + write_sysreg_el0(cval, SYS_CNTV_CVAL); isb(); write_sysreg_el0(timer_get_ctl(ctx), SYS_CNTV_CTL); break; @@ -762,7 +781,7 @@ static void kvm_timer_vcpu_load_nested_switch(struct kvm_vcpu *vcpu, static void timer_set_traps(struct kvm_vcpu *vcpu, struct timer_map *map) { - bool tpt, tpc; + bool tvt, tpt, tvc, tpc, tvt02, tpt02; u64 clr, set; /* @@ -777,7 +796,29 @@ static void timer_set_traps(struct kvm_vcpu *vcpu, struct timer_map *map) * within this function, reality kicks in and we start adding * traps based on emulation requirements. */ - tpt = tpc = false; + tvt = tpt = tvc = tpc = false; + tvt02 = tpt02 = false; + + /* + * NV2 badly breaks the timer semantics by redirecting accesses to + * the EL1 timer state to memory, so let's call ECV to the rescue if + * available: we trap all CNT{P,V}_{CTL,CVAL,TVAL}_EL0 accesses. + * + * The treatment slightly varies depending whether we run a nVHE or + * VHE guest: nVHE will use the _EL0 registers directly, while VHE + * will use the _EL02 accessors. This translates in different trap + * bits. + * + * None of the trapping is required when running in non-HYP context, + * unless required by the L1 hypervisor settings once we advertise + * ECV+NV in the guest, or that we need trapping for other reasons. + */ + if (cpus_have_final_cap(ARM64_HAS_ECV) && is_hyp_ctxt(vcpu)) { + if (vcpu_el2_e2h_is_set(vcpu)) + tvt02 = tpt02 = true; + else + tvt = tpt = true; + } /* * We have two possibility to deal with a physical offset: @@ -793,9 +834,20 @@ static void timer_set_traps(struct kvm_vcpu *vcpu, struct timer_map *map) tpt = tpc = true; /* + * For the poor sods that could not correctly substract one value + * from another, trap the full virtual timer and counter. + */ + if (has_broken_cntvoff() && timer_get_offset(map->direct_vtimer)) + tvt = tvc = true; + + /* * Apply the enable bits that the guest hypervisor has requested for * its own guest. We can only add traps that wouldn't have been set * above. + * Implementation choices: we do not support NV when E2H=0 in the + * guest, and we don't support configuration where E2H is writable + * by the guest (either FEAT_VHE or FEAT_E2H0 is implemented, but + * not both). This simplifies the handling of the EL1NV* bits. */ if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) { u64 val = __vcpu_sys_reg(vcpu, CNTHCTL_EL2); @@ -806,6 +858,9 @@ static void timer_set_traps(struct kvm_vcpu *vcpu, struct timer_map *map) tpt |= !(val & (CNTHCTL_EL1PCEN << 10)); tpc |= !(val & (CNTHCTL_EL1PCTEN << 10)); + + tpt02 |= (val & CNTHCTL_EL1NVPCT); + tvt02 |= (val & CNTHCTL_EL1NVVCT); } /* @@ -817,6 +872,10 @@ static void timer_set_traps(struct kvm_vcpu *vcpu, struct timer_map *map) assign_clear_set_bit(tpt, CNTHCTL_EL1PCEN << 10, set, clr); assign_clear_set_bit(tpc, CNTHCTL_EL1PCTEN << 10, set, clr); + assign_clear_set_bit(tvt, CNTHCTL_EL1TVT, clr, set); + assign_clear_set_bit(tvc, CNTHCTL_EL1TVCT, clr, set); + assign_clear_set_bit(tvt02, CNTHCTL_EL1NVVCT, clr, set); + assign_clear_set_bit(tpt02, CNTHCTL_EL1NVPCT, clr, set); /* This only happens on VHE, so use the CNTHCTL_EL2 accessor. */ sysreg_clear_set(cnthctl_el2, clr, set); @@ -905,6 +964,54 @@ void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu) kvm_timer_blocking(vcpu); } +void kvm_timer_sync_nested(struct kvm_vcpu *vcpu) +{ + /* + * When NV2 is on, guest hypervisors have their EL1 timer register + * accesses redirected to the VNCR page. Any guest action taken on + * the timer is postponed until the next exit, leading to a very + * poor quality of emulation. + * + * This is an unmitigated disaster, only papered over by FEAT_ECV, + * which allows trapping of the timer registers even with NV2. + * Still, this is still worse than FEAT_NV on its own. Meh. + */ + if (!vcpu_el2_e2h_is_set(vcpu)) { + if (cpus_have_final_cap(ARM64_HAS_ECV)) + return; + + /* + * A non-VHE guest hypervisor doesn't have any direct access + * to its timers: the EL2 registers trap (and the HW is + * fully emulated), while the EL0 registers access memory + * despite the access being notionally direct. Boo. + * + * We update the hardware timer registers with the + * latest value written by the guest to the VNCR page + * and let the hardware take care of the rest. + */ + write_sysreg_el0(__vcpu_sys_reg(vcpu, CNTV_CTL_EL0), SYS_CNTV_CTL); + write_sysreg_el0(__vcpu_sys_reg(vcpu, CNTV_CVAL_EL0), SYS_CNTV_CVAL); + write_sysreg_el0(__vcpu_sys_reg(vcpu, CNTP_CTL_EL0), SYS_CNTP_CTL); + write_sysreg_el0(__vcpu_sys_reg(vcpu, CNTP_CVAL_EL0), SYS_CNTP_CVAL); + } else { + /* + * For a VHE guest hypervisor, the EL2 state is directly + * stored in the host EL1 timers, while the emulated EL0 + * state is stored in the VNCR page. The latter could have + * been updated behind our back, and we must reset the + * emulation of the timers. + */ + struct timer_map map; + get_timer_map(vcpu, &map); + + soft_timer_cancel(&map.emul_vtimer->hrtimer); + soft_timer_cancel(&map.emul_ptimer->hrtimer); + timer_emulate(map.emul_vtimer); + timer_emulate(map.emul_ptimer); + } +} + /* * With a userspace irqchip we have to check if the guest de-asserted the * timer and if so, unmask the timer irq signal on the host interrupt @@ -1363,6 +1470,37 @@ static int kvm_irq_init(struct arch_timer_kvm_info *info) return 0; } +static void kvm_timer_handle_errata(void) +{ + u64 mmfr0, mmfr1, mmfr4; + + /* + * CNTVOFF_EL2 is broken on some implementations. For those, we trap + * all virtual timer/counter accesses, requiring FEAT_ECV. + * + * However, a hypervisor supporting nesting is likely to mitigate the + * erratum at L0, and not require other levels to mitigate it (which + * would otherwise be a terrible performance sink due to trap + * amplification). + * + * Given that the affected HW implements both FEAT_VHE and FEAT_E2H0, + * and that NV is likely not to (because of limitations of the + * architecture), only enable the workaround when FEAT_VHE and + * FEAT_E2H0 are both detected. Time will tell if this actually holds. + */ + mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); + mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1); + mmfr4 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR4_EL1); + if (SYS_FIELD_GET(ID_AA64MMFR1_EL1, VH, mmfr1) && + !SYS_FIELD_GET(ID_AA64MMFR4_EL1, E2H0, mmfr4) && + SYS_FIELD_GET(ID_AA64MMFR0_EL1, ECV, mmfr0) && + (has_vhe() || has_hvhe()) && + cpus_have_final_cap(ARM64_WORKAROUND_QCOM_ORYON_CNTVOFF)) { + static_branch_enable(&broken_cntvoff_key); + kvm_info("Broken CNTVOFF_EL2, trapping virtual timer\n"); + } +} + int __init kvm_timer_hyp_init(bool has_gic) { struct arch_timer_kvm_info *info; @@ -1431,6 +1569,7 @@ int __init kvm_timer_hyp_init(bool has_gic) goto out_free_vtimer_irq; } + kvm_timer_handle_errata(); return 0; out_free_ptimer_irq: diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 7b2735ad32e9..646e806c6ca6 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -61,7 +61,7 @@ static enum kvm_wfx_trap_policy kvm_wfe_trap_policy __read_mostly = KVM_WFX_NOTR DECLARE_KVM_HYP_PER_CPU(unsigned long, kvm_hyp_vector); -DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page); +DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_base); DECLARE_KVM_NVHE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params); DECLARE_KVM_NVHE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt); @@ -80,31 +80,6 @@ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE; } -/* - * This functions as an allow-list of protected VM capabilities. - * Features not explicitly allowed by this function are denied. - */ -static bool pkvm_ext_allowed(struct kvm *kvm, long ext) -{ - switch (ext) { - case KVM_CAP_IRQCHIP: - case KVM_CAP_ARM_PSCI: - case KVM_CAP_ARM_PSCI_0_2: - case KVM_CAP_NR_VCPUS: - case KVM_CAP_MAX_VCPUS: - case KVM_CAP_MAX_VCPU_ID: - case KVM_CAP_MSI_DEVID: - case KVM_CAP_ARM_VM_IPA_SIZE: - case KVM_CAP_ARM_PMU_V3: - case KVM_CAP_ARM_SVE: - case KVM_CAP_ARM_PTRAUTH_ADDRESS: - case KVM_CAP_ARM_PTRAUTH_GENERIC: - return true; - default: - return false; - } -} - int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) { @@ -113,7 +88,7 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm, if (cap->flags) return -EINVAL; - if (kvm_vm_is_protected(kvm) && !pkvm_ext_allowed(kvm, cap->cap)) + if (kvm_vm_is_protected(kvm) && !kvm_pvm_ext_allowed(cap->cap)) return -EINVAL; switch (cap->cap) { @@ -311,7 +286,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) { int r; - if (kvm && kvm_vm_is_protected(kvm) && !pkvm_ext_allowed(kvm, ext)) + if (kvm && kvm_vm_is_protected(kvm) && !kvm_pvm_ext_allowed(ext)) return 0; switch (ext) { @@ -476,8 +451,6 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) kvm_pmu_vcpu_init(vcpu); - kvm_arm_reset_debug_ptr(vcpu); - kvm_arm_pvtime_vcpu_init(&vcpu->arch); vcpu->arch.hw_mmu = &vcpu->kvm->arch.mmu; @@ -502,7 +475,10 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) { - kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); + if (!is_protected_kvm_enabled()) + kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); + else + free_hyp_memcache(&vcpu->arch.pkvm_memcache); kvm_timer_vcpu_terminate(vcpu); kvm_pmu_vcpu_destroy(vcpu); kvm_vgic_vcpu_destroy(vcpu); @@ -574,6 +550,9 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) struct kvm_s2_mmu *mmu; int *last_ran; + if (is_protected_kvm_enabled()) + goto nommu; + if (vcpu_has_nv(vcpu)) kvm_vcpu_load_hw_mmu(vcpu); @@ -594,10 +573,12 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) *last_ran = vcpu->vcpu_idx; } +nommu: vcpu->cpu = cpu; kvm_vgic_load(vcpu); kvm_timer_vcpu_load(vcpu); + kvm_vcpu_load_debug(vcpu); if (has_vhe()) kvm_vcpu_load_vhe(vcpu); kvm_arch_vcpu_load_fp(vcpu); @@ -617,7 +598,13 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) vcpu_set_pauth_traps(vcpu); - kvm_arch_vcpu_load_debug_state_flags(vcpu); + if (is_protected_kvm_enabled()) { + kvm_call_hyp_nvhe(__pkvm_vcpu_load, + vcpu->kvm->arch.pkvm.handle, + vcpu->vcpu_idx, vcpu->arch.hcr_el2); + kvm_call_hyp(__vgic_v3_restore_vmcr_aprs, + &vcpu->arch.vgic_cpu.vgic_v3); + } if (!cpumask_test_cpu(cpu, vcpu->kvm->arch.supported_cpus)) vcpu_set_on_unsupported_cpu(vcpu); @@ -625,7 +612,13 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) { - kvm_arch_vcpu_put_debug_state_flags(vcpu); + if (is_protected_kvm_enabled()) { + kvm_call_hyp(__vgic_v3_save_vmcr_aprs, + &vcpu->arch.vgic_cpu.vgic_v3); + kvm_call_hyp_nvhe(__pkvm_vcpu_put); + } + + kvm_vcpu_put_debug(vcpu); kvm_arch_vcpu_put_fp(vcpu); if (has_vhe()) kvm_vcpu_put_vhe(vcpu); @@ -808,8 +801,6 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu) kvm_init_mpidr_data(kvm); - kvm_arm_vcpu_init_debug(vcpu); - if (likely(irqchip_in_kernel(kvm))) { /* * Map the VGIC hardware resources before running a vcpu the @@ -1187,7 +1178,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) continue; } - kvm_arm_setup_debug(vcpu); kvm_arch_vcpu_ctxflush_fp(vcpu); /************************************************************** @@ -1204,8 +1194,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) * Back from guest *************************************************************/ - kvm_arm_clear_debug(vcpu); - /* * We must sync the PMU state before the vgic state so * that the vgic can properly sample the updated state of the @@ -1228,6 +1216,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) if (unlikely(!irqchip_in_kernel(vcpu->kvm))) kvm_timer_sync_user(vcpu); + if (is_hyp_ctxt(vcpu)) + kvm_timer_sync_nested(vcpu); + kvm_arch_vcpu_ctxsync_fp(vcpu); /* @@ -1571,7 +1562,6 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu, } vcpu_reset_hcr(vcpu); - vcpu->arch.cptr_el2 = kvm_get_reset_cptr_el2(vcpu); /* * Handle the "start in power-off" case. @@ -2109,6 +2099,7 @@ static void cpu_set_hyp_vector(void) static void cpu_hyp_init_context(void) { kvm_init_host_cpu_context(host_data_ptr(host_ctxt)); + kvm_init_host_debug_data(); if (!is_kernel_in_hyp_mode()) cpu_init_hyp_mode(); @@ -2117,7 +2108,6 @@ static void cpu_hyp_init_context(void) static void cpu_hyp_init_features(void) { cpu_set_hyp_vector(); - kvm_arm_init_debug(); if (is_kernel_in_hyp_mode()) kvm_timer_init_vhe(); @@ -2339,7 +2329,7 @@ static void __init teardown_hyp_mode(void) free_hyp_pgds(); for_each_possible_cpu(cpu) { - free_page(per_cpu(kvm_arm_hyp_stack_page, cpu)); + free_pages(per_cpu(kvm_arm_hyp_stack_base, cpu), NVHE_STACK_SHIFT - PAGE_SHIFT); free_pages(kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu], nvhe_percpu_order()); if (free_sve) { @@ -2527,15 +2517,15 @@ static int __init init_hyp_mode(void) * Allocate stack pages for Hypervisor-mode */ for_each_possible_cpu(cpu) { - unsigned long stack_page; + unsigned long stack_base; - stack_page = __get_free_page(GFP_KERNEL); - if (!stack_page) { + stack_base = __get_free_pages(GFP_KERNEL, NVHE_STACK_SHIFT - PAGE_SHIFT); + if (!stack_base) { err = -ENOMEM; goto out_err; } - per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page; + per_cpu(kvm_arm_hyp_stack_base, cpu) = stack_base; } /* @@ -2604,9 +2594,9 @@ static int __init init_hyp_mode(void) */ for_each_possible_cpu(cpu) { struct kvm_nvhe_init_params *params = per_cpu_ptr_nvhe_sym(kvm_init_params, cpu); - char *stack_page = (char *)per_cpu(kvm_arm_hyp_stack_page, cpu); + char *stack_base = (char *)per_cpu(kvm_arm_hyp_stack_base, cpu); - err = create_hyp_stack(__pa(stack_page), ¶ms->stack_hyp_va); + err = create_hyp_stack(__pa(stack_base), ¶ms->stack_hyp_va); if (err) { kvm_err("Cannot map hyp stack\n"); goto out_err; @@ -2618,7 +2608,7 @@ static int __init init_hyp_mode(void) * __hyp_pa() won't do the right thing there, since the stack * has been mapped in the flexible private VA space. */ - params->stack_pa = __pa(stack_page); + params->stack_pa = __pa(stack_base); } for_each_possible_cpu(cpu) { diff --git a/arch/arm64/kvm/debug.c b/arch/arm64/kvm/debug.c index ce8886122ed3..0e4c805e7e89 100644 --- a/arch/arm64/kvm/debug.c +++ b/arch/arm64/kvm/debug.c @@ -3,7 +3,8 @@ * Debug and Guest Debug support * * Copyright (C) 2015 - Linaro Ltd - * Author: Alex Bennée <alex.bennee@linaro.org> + * Authors: Alex Bennée <alex.bennee@linaro.org> + * Oliver Upton <oliver.upton@linux.dev> */ #include <linux/kvm_host.h> @@ -14,72 +15,6 @@ #include <asm/kvm_arm.h> #include <asm/kvm_emulate.h> -#include "trace.h" - -/* These are the bits of MDSCR_EL1 we may manipulate */ -#define MDSCR_EL1_DEBUG_MASK (DBG_MDSCR_SS | \ - DBG_MDSCR_KDE | \ - DBG_MDSCR_MDE) - -static DEFINE_PER_CPU(u64, mdcr_el2); - -/* - * save/restore_guest_debug_regs - * - * For some debug operations we need to tweak some guest registers. As - * a result we need to save the state of those registers before we - * make those modifications. - * - * Guest access to MDSCR_EL1 is trapped by the hypervisor and handled - * after we have restored the preserved value to the main context. - * - * When single-step is enabled by userspace, we tweak PSTATE.SS on every - * guest entry. Preserve PSTATE.SS so we can restore the original value - * for the vcpu after the single-step is disabled. - */ -static void save_guest_debug_regs(struct kvm_vcpu *vcpu) -{ - u64 val = vcpu_read_sys_reg(vcpu, MDSCR_EL1); - - vcpu->arch.guest_debug_preserved.mdscr_el1 = val; - - trace_kvm_arm_set_dreg32("Saved MDSCR_EL1", - vcpu->arch.guest_debug_preserved.mdscr_el1); - - vcpu->arch.guest_debug_preserved.pstate_ss = - (*vcpu_cpsr(vcpu) & DBG_SPSR_SS); -} - -static void restore_guest_debug_regs(struct kvm_vcpu *vcpu) -{ - u64 val = vcpu->arch.guest_debug_preserved.mdscr_el1; - - vcpu_write_sys_reg(vcpu, val, MDSCR_EL1); - - trace_kvm_arm_set_dreg32("Restored MDSCR_EL1", - vcpu_read_sys_reg(vcpu, MDSCR_EL1)); - - if (vcpu->arch.guest_debug_preserved.pstate_ss) - *vcpu_cpsr(vcpu) |= DBG_SPSR_SS; - else - *vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS; -} - -/** - * kvm_arm_init_debug - grab what we need for debug - * - * Currently the sole task of this function is to retrieve the initial - * value of mdcr_el2 so we can preserve MDCR_EL2.HPMN which has - * presumably been set-up by some knowledgeable bootcode. - * - * It is called once per-cpu during CPU hyp initialisation. - */ - -void kvm_arm_init_debug(void) -{ - __this_cpu_write(mdcr_el2, kvm_call_hyp_ret(__kvm_get_mdcr_el2)); -} - /** * kvm_arm_setup_mdcr_el2 - configure vcpu mdcr_el2 value * @@ -95,11 +30,14 @@ void kvm_arm_init_debug(void) */ static void kvm_arm_setup_mdcr_el2(struct kvm_vcpu *vcpu) { + preempt_disable(); + /* * This also clears MDCR_EL2_E2PB_MASK and MDCR_EL2_E2TB_MASK * to disable guest access to the profiling and trace buffers */ - vcpu->arch.mdcr_el2 = __this_cpu_read(mdcr_el2) & MDCR_EL2_HPMN_MASK; + vcpu->arch.mdcr_el2 = FIELD_PREP(MDCR_EL2_HPMN, + *host_data_ptr(nr_event_counters)); vcpu->arch.mdcr_el2 |= (MDCR_EL2_TPM | MDCR_EL2_TPMS | MDCR_EL2_TTRF | @@ -113,233 +51,215 @@ static void kvm_arm_setup_mdcr_el2(struct kvm_vcpu *vcpu) vcpu->arch.mdcr_el2 |= MDCR_EL2_TDE; /* - * Trap debug register access when one of the following is true: - * - Userspace is using the hardware to debug the guest - * (KVM_GUESTDBG_USE_HW is set). - * - The guest is not using debug (DEBUG_DIRTY clear). - * - The guest has enabled the OS Lock (debug exceptions are blocked). + * Trap debug registers if the guest doesn't have ownership of them. */ - if ((vcpu->guest_debug & KVM_GUESTDBG_USE_HW) || - !vcpu_get_flag(vcpu, DEBUG_DIRTY) || - kvm_vcpu_os_lock_enabled(vcpu)) + if (!kvm_guest_owns_debug_regs(vcpu)) vcpu->arch.mdcr_el2 |= MDCR_EL2_TDA; - trace_kvm_arm_set_dreg32("MDCR_EL2", vcpu->arch.mdcr_el2); -} + /* Write MDCR_EL2 directly if we're already at EL2 */ + if (has_vhe()) + write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2); -/** - * kvm_arm_vcpu_init_debug - setup vcpu debug traps - * - * @vcpu: the vcpu pointer - * - * Set vcpu initial mdcr_el2 value. - */ -void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu) -{ - preempt_disable(); - kvm_arm_setup_mdcr_el2(vcpu); preempt_enable(); } -/** - * kvm_arm_reset_debug_ptr - reset the debug ptr to point to the vcpu state - * @vcpu: the vcpu pointer - */ - -void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu) +void kvm_init_host_debug_data(void) { - vcpu->arch.debug_ptr = &vcpu->arch.vcpu_debug_state; + u64 dfr0 = read_sysreg(id_aa64dfr0_el1); + + if (cpuid_feature_extract_signed_field(dfr0, ID_AA64DFR0_EL1_PMUVer_SHIFT) > 0) + *host_data_ptr(nr_event_counters) = FIELD_GET(ARMV8_PMU_PMCR_N, + read_sysreg(pmcr_el0)); + + *host_data_ptr(debug_brps) = SYS_FIELD_GET(ID_AA64DFR0_EL1, BRPs, dfr0); + *host_data_ptr(debug_wrps) = SYS_FIELD_GET(ID_AA64DFR0_EL1, WRPs, dfr0); + + if (has_vhe()) + return; + + if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_PMSVer_SHIFT) && + !(read_sysreg_s(SYS_PMBIDR_EL1) & PMBIDR_EL1_P)) + host_data_set_flag(HAS_SPE); + + if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_TraceFilt_SHIFT)) { + /* Force disable trace in protected mode in case of no TRBE */ + if (is_protected_kvm_enabled()) + host_data_set_flag(EL1_TRACING_CONFIGURED); + + if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_TraceBuffer_SHIFT) && + !(read_sysreg_s(SYS_TRBIDR_EL1) & TRBIDR_EL1_P)) + host_data_set_flag(HAS_TRBE); + } } -/** - * kvm_arm_setup_debug - set up debug related stuff +/* + * Configures the 'external' MDSCR_EL1 value for the guest, i.e. when the host + * has taken over MDSCR_EL1. * - * @vcpu: the vcpu pointer + * - Userspace is single-stepping the guest, and MDSCR_EL1.SS is forced to 1. * - * This is called before each entry into the hypervisor to setup any - * debug related registers. + * - Userspace is using the breakpoint/watchpoint registers to debug the + * guest, and MDSCR_EL1.MDE is forced to 1. * - * Additionally, KVM only traps guest accesses to the debug registers if - * the guest is not actively using them (see the DEBUG_DIRTY - * flag on vcpu->arch.iflags). Since the guest must not interfere - * with the hardware state when debugging the guest, we must ensure that - * trapping is enabled whenever we are debugging the guest using the - * debug registers. + * - The guest has enabled the OS Lock, and KVM is forcing MDSCR_EL1.MDE to 0, + * masking all debug exceptions affected by the OS Lock. */ - -void kvm_arm_setup_debug(struct kvm_vcpu *vcpu) +static void setup_external_mdscr(struct kvm_vcpu *vcpu) { - unsigned long mdscr, orig_mdcr_el2 = vcpu->arch.mdcr_el2; + /* + * Use the guest's MDSCR_EL1 as a starting point, since there are + * several other features controlled by MDSCR_EL1 that are not relevant + * to the host. + * + * Clear the bits that KVM may use which also satisfies emulation of + * the OS Lock as MDSCR_EL1.MDE is cleared. + */ + u64 mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1) & ~(MDSCR_EL1_SS | + MDSCR_EL1_MDE | + MDSCR_EL1_KDE); - trace_kvm_arm_setup_debug(vcpu, vcpu->guest_debug); + if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) + mdscr |= MDSCR_EL1_SS; - kvm_arm_setup_mdcr_el2(vcpu); + if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) + mdscr |= MDSCR_EL1_MDE | MDSCR_EL1_KDE; - /* Check if we need to use the debug registers. */ + vcpu->arch.external_mdscr_el1 = mdscr; +} + +void kvm_vcpu_load_debug(struct kvm_vcpu *vcpu) +{ + u64 mdscr; + + /* Must be called before kvm_vcpu_load_vhe() */ + KVM_BUG_ON(vcpu_get_flag(vcpu, SYSREGS_ON_CPU), vcpu->kvm); + + /* + * Determine which of the possible debug states we're in: + * + * - VCPU_DEBUG_HOST_OWNED: KVM has taken ownership of the guest's + * breakpoint/watchpoint registers, or needs to use MDSCR_EL1 to do + * software step or emulate the effects of the OS Lock being enabled. + * + * - VCPU_DEBUG_GUEST_OWNED: The guest has debug exceptions enabled, and + * the breakpoint/watchpoint registers need to be loaded eagerly. + * + * - VCPU_DEBUG_FREE: Neither of the above apply, no breakpoint/watchpoint + * context needs to be loaded on the CPU. + */ if (vcpu->guest_debug || kvm_vcpu_os_lock_enabled(vcpu)) { - /* Save guest debug state */ - save_guest_debug_regs(vcpu); + vcpu->arch.debug_owner = VCPU_DEBUG_HOST_OWNED; + setup_external_mdscr(vcpu); /* - * Single Step (ARM ARM D2.12.3 The software step state - * machine) - * - * If we are doing Single Step we need to manipulate - * the guest's MDSCR_EL1.SS and PSTATE.SS. Once the - * step has occurred the hypervisor will trap the - * debug exception and we return to userspace. - * - * If the guest attempts to single step its userspace - * we would have to deal with a trapped exception - * while in the guest kernel. Because this would be - * hard to unwind we suppress the guest's ability to - * do so by masking MDSCR_EL.SS. - * - * This confuses guest debuggers which use - * single-step behind the scenes but everything - * returns to normal once the host is no longer - * debugging the system. + * Steal the guest's single-step state machine if userspace wants + * single-step the guest. */ if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { - /* - * If the software step state at the last guest exit - * was Active-pending, we don't set DBG_SPSR_SS so - * that the state is maintained (to not run another - * single-step until the pending Software Step - * exception is taken). - */ - if (!vcpu_get_flag(vcpu, DBG_SS_ACTIVE_PENDING)) + if (*vcpu_cpsr(vcpu) & DBG_SPSR_SS) + vcpu_clear_flag(vcpu, GUEST_SS_ACTIVE_PENDING); + else + vcpu_set_flag(vcpu, GUEST_SS_ACTIVE_PENDING); + + if (!vcpu_get_flag(vcpu, HOST_SS_ACTIVE_PENDING)) *vcpu_cpsr(vcpu) |= DBG_SPSR_SS; else *vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS; - - mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1); - mdscr |= DBG_MDSCR_SS; - vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1); - } else { - mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1); - mdscr &= ~DBG_MDSCR_SS; - vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1); } + } else { + mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1); - trace_kvm_arm_set_dreg32("SPSR_EL2", *vcpu_cpsr(vcpu)); - - /* - * HW Breakpoints and watchpoints - * - * We simply switch the debug_ptr to point to our new - * external_debug_state which has been populated by the - * debug ioctl. The existing DEBUG_DIRTY mechanism ensures - * the registers are updated on the world switch. - */ - if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) { - /* Enable breakpoints/watchpoints */ - mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1); - mdscr |= DBG_MDSCR_MDE; - vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1); - - vcpu->arch.debug_ptr = &vcpu->arch.external_debug_state; - vcpu_set_flag(vcpu, DEBUG_DIRTY); - - trace_kvm_arm_set_regset("BKPTS", get_num_brps(), - &vcpu->arch.debug_ptr->dbg_bcr[0], - &vcpu->arch.debug_ptr->dbg_bvr[0]); - - trace_kvm_arm_set_regset("WAPTS", get_num_wrps(), - &vcpu->arch.debug_ptr->dbg_wcr[0], - &vcpu->arch.debug_ptr->dbg_wvr[0]); - - /* - * The OS Lock blocks debug exceptions in all ELs when it is - * enabled. If the guest has enabled the OS Lock, constrain its - * effects to the guest. Emulate the behavior by clearing - * MDSCR_EL1.MDE. In so doing, we ensure that host debug - * exceptions are unaffected by guest configuration of the OS - * Lock. - */ - } else if (kvm_vcpu_os_lock_enabled(vcpu)) { - mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1); - mdscr &= ~DBG_MDSCR_MDE; - vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1); - } + if (mdscr & (MDSCR_EL1_KDE | MDSCR_EL1_MDE)) + vcpu->arch.debug_owner = VCPU_DEBUG_GUEST_OWNED; + else + vcpu->arch.debug_owner = VCPU_DEBUG_FREE; } - BUG_ON(!vcpu->guest_debug && - vcpu->arch.debug_ptr != &vcpu->arch.vcpu_debug_state); - - /* If KDE or MDE are set, perform a full save/restore cycle. */ - if (vcpu_read_sys_reg(vcpu, MDSCR_EL1) & (DBG_MDSCR_KDE | DBG_MDSCR_MDE)) - vcpu_set_flag(vcpu, DEBUG_DIRTY); - - /* Write mdcr_el2 changes since vcpu_load on VHE systems */ - if (has_vhe() && orig_mdcr_el2 != vcpu->arch.mdcr_el2) - write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2); - - trace_kvm_arm_set_dreg32("MDSCR_EL1", vcpu_read_sys_reg(vcpu, MDSCR_EL1)); + kvm_arm_setup_mdcr_el2(vcpu); } -void kvm_arm_clear_debug(struct kvm_vcpu *vcpu) +void kvm_vcpu_put_debug(struct kvm_vcpu *vcpu) { - trace_kvm_arm_clear_debug(vcpu->guest_debug); + if (likely(!(vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP))) + return; /* - * Restore the guest's debug registers if we were using them. + * Save the host's software step state and restore the guest's before + * potentially returning to userspace. */ - if (vcpu->guest_debug || kvm_vcpu_os_lock_enabled(vcpu)) { - if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { - if (!(*vcpu_cpsr(vcpu) & DBG_SPSR_SS)) - /* - * Mark the vcpu as ACTIVE_PENDING - * until Software Step exception is taken. - */ - vcpu_set_flag(vcpu, DBG_SS_ACTIVE_PENDING); - } - - restore_guest_debug_regs(vcpu); + if (!(*vcpu_cpsr(vcpu) & DBG_SPSR_SS)) + vcpu_set_flag(vcpu, HOST_SS_ACTIVE_PENDING); + else + vcpu_clear_flag(vcpu, HOST_SS_ACTIVE_PENDING); - /* - * If we were using HW debug we need to restore the - * debug_ptr to the guest debug state. - */ - if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) { - kvm_arm_reset_debug_ptr(vcpu); + if (vcpu_get_flag(vcpu, GUEST_SS_ACTIVE_PENDING)) + *vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS; + else + *vcpu_cpsr(vcpu) |= DBG_SPSR_SS; +} - trace_kvm_arm_set_regset("BKPTS", get_num_brps(), - &vcpu->arch.debug_ptr->dbg_bcr[0], - &vcpu->arch.debug_ptr->dbg_bvr[0]); +/* + * Updates ownership of the debug registers after a trapped guest access to a + * breakpoint/watchpoint register. Host ownership of the debug registers is of + * strictly higher priority, and it is the responsibility of the VMM to emulate + * guest debug exceptions in this configuration. + */ +void kvm_debug_set_guest_ownership(struct kvm_vcpu *vcpu) +{ + if (kvm_host_owns_debug_regs(vcpu)) + return; - trace_kvm_arm_set_regset("WAPTS", get_num_wrps(), - &vcpu->arch.debug_ptr->dbg_wcr[0], - &vcpu->arch.debug_ptr->dbg_wvr[0]); - } - } + vcpu->arch.debug_owner = VCPU_DEBUG_GUEST_OWNED; + kvm_arm_setup_mdcr_el2(vcpu); } -void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu) +void kvm_debug_handle_oslar(struct kvm_vcpu *vcpu, u64 val) { - u64 dfr0; + if (val & OSLAR_EL1_OSLK) + __vcpu_sys_reg(vcpu, OSLSR_EL1) |= OSLSR_EL1_OSLK; + else + __vcpu_sys_reg(vcpu, OSLSR_EL1) &= ~OSLSR_EL1_OSLK; - /* For VHE, there is nothing to do */ - if (has_vhe()) + preempt_disable(); + kvm_arch_vcpu_put(vcpu); + kvm_arch_vcpu_load(vcpu, smp_processor_id()); + preempt_enable(); +} + +void kvm_enable_trbe(void) +{ + if (has_vhe() || is_protected_kvm_enabled() || + WARN_ON_ONCE(preemptible())) return; - dfr0 = read_sysreg(id_aa64dfr0_el1); - /* - * If SPE is present on this CPU and is available at current EL, - * we may need to check if the host state needs to be saved. - */ - if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_PMSVer_SHIFT) && - !(read_sysreg_s(SYS_PMBIDR_EL1) & BIT(PMBIDR_EL1_P_SHIFT))) - vcpu_set_flag(vcpu, DEBUG_STATE_SAVE_SPE); + host_data_set_flag(TRBE_ENABLED); +} +EXPORT_SYMBOL_GPL(kvm_enable_trbe); + +void kvm_disable_trbe(void) +{ + if (has_vhe() || is_protected_kvm_enabled() || + WARN_ON_ONCE(preemptible())) + return; - /* Check if we have TRBE implemented and available at the host */ - if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_TraceBuffer_SHIFT) && - !(read_sysreg_s(SYS_TRBIDR_EL1) & TRBIDR_EL1_P)) - vcpu_set_flag(vcpu, DEBUG_STATE_SAVE_TRBE); + host_data_clear_flag(TRBE_ENABLED); } +EXPORT_SYMBOL_GPL(kvm_disable_trbe); -void kvm_arch_vcpu_put_debug_state_flags(struct kvm_vcpu *vcpu) +void kvm_tracing_set_el1_configuration(u64 trfcr_while_in_guest) { - vcpu_clear_flag(vcpu, DEBUG_STATE_SAVE_SPE); - vcpu_clear_flag(vcpu, DEBUG_STATE_SAVE_TRBE); + if (is_protected_kvm_enabled() || WARN_ON_ONCE(preemptible())) + return; + + if (has_vhe()) { + write_sysreg_s(trfcr_while_in_guest, SYS_TRFCR_EL12); + return; + } + + *host_data_ptr(trfcr_while_in_guest) = trfcr_while_in_guest; + if (read_sysreg_s(SYS_TRFCR_EL1) != trfcr_while_in_guest) + host_data_set_flag(EL1_TRACING_CONFIGURED); + else + host_data_clear_flag(EL1_TRACING_CONFIGURED); } +EXPORT_SYMBOL_GPL(kvm_tracing_set_el1_configuration); diff --git a/arch/arm64/kvm/emulate-nested.c b/arch/arm64/kvm/emulate-nested.c index f1b7287e1f3c..607d37bab70b 100644 --- a/arch/arm64/kvm/emulate-nested.c +++ b/arch/arm64/kvm/emulate-nested.c @@ -89,6 +89,9 @@ enum cgt_group_id { CGT_HCRX_EnFPM, CGT_HCRX_TCR2En, + CGT_CNTHCTL_EL1TVT, + CGT_CNTHCTL_EL1TVCT, + CGT_ICH_HCR_TC, CGT_ICH_HCR_TALL0, CGT_ICH_HCR_TALL1, @@ -124,6 +127,8 @@ enum cgt_group_id { __COMPLEX_CONDITIONS__, CGT_CNTHCTL_EL1PCTEN = __COMPLEX_CONDITIONS__, CGT_CNTHCTL_EL1PTEN, + CGT_CNTHCTL_EL1NVPCT, + CGT_CNTHCTL_EL1NVVCT, CGT_CPTR_TTA, CGT_MDCR_HPMN, @@ -393,6 +398,18 @@ static const struct trap_bits coarse_trap_bits[] = { .mask = HCRX_EL2_TCR2En, .behaviour = BEHAVE_FORWARD_RW, }, + [CGT_CNTHCTL_EL1TVT] = { + .index = CNTHCTL_EL2, + .value = CNTHCTL_EL1TVT, + .mask = CNTHCTL_EL1TVT, + .behaviour = BEHAVE_FORWARD_RW, + }, + [CGT_CNTHCTL_EL1TVCT] = { + .index = CNTHCTL_EL2, + .value = CNTHCTL_EL1TVCT, + .mask = CNTHCTL_EL1TVCT, + .behaviour = BEHAVE_FORWARD_READ, + }, [CGT_ICH_HCR_TC] = { .index = ICH_HCR_EL2, .value = ICH_HCR_TC, @@ -487,6 +504,32 @@ static enum trap_behaviour check_cnthctl_el1pten(struct kvm_vcpu *vcpu) return BEHAVE_FORWARD_RW; } +static bool is_nested_nv2_guest(struct kvm_vcpu *vcpu) +{ + u64 val; + + val = __vcpu_sys_reg(vcpu, HCR_EL2); + return ((val & (HCR_E2H | HCR_TGE | HCR_NV2 | HCR_NV1 | HCR_NV)) == (HCR_E2H | HCR_NV2 | HCR_NV)); +} + +static enum trap_behaviour check_cnthctl_el1nvpct(struct kvm_vcpu *vcpu) +{ + if (!is_nested_nv2_guest(vcpu) || + !(__vcpu_sys_reg(vcpu, CNTHCTL_EL2) & CNTHCTL_EL1NVPCT)) + return BEHAVE_HANDLE_LOCALLY; + + return BEHAVE_FORWARD_RW; +} + +static enum trap_behaviour check_cnthctl_el1nvvct(struct kvm_vcpu *vcpu) +{ + if (!is_nested_nv2_guest(vcpu) || + !(__vcpu_sys_reg(vcpu, CNTHCTL_EL2) & CNTHCTL_EL1NVVCT)) + return BEHAVE_HANDLE_LOCALLY; + + return BEHAVE_FORWARD_RW; +} + static enum trap_behaviour check_cptr_tta(struct kvm_vcpu *vcpu) { u64 val = __vcpu_sys_reg(vcpu, CPTR_EL2); @@ -534,6 +577,8 @@ static enum trap_behaviour check_mdcr_hpmn(struct kvm_vcpu *vcpu) static const complex_condition_check ccc[] = { CCC(CGT_CNTHCTL_EL1PCTEN, check_cnthctl_el1pcten), CCC(CGT_CNTHCTL_EL1PTEN, check_cnthctl_el1pten), + CCC(CGT_CNTHCTL_EL1NVPCT, check_cnthctl_el1nvpct), + CCC(CGT_CNTHCTL_EL1NVVCT, check_cnthctl_el1nvvct), CCC(CGT_CPTR_TTA, check_cptr_tta), CCC(CGT_MDCR_HPMN, check_mdcr_hpmn), }; @@ -850,11 +895,15 @@ static const struct encoding_to_trap_config encoding_to_cgt[] __initconst = { SYS_CNTHP_CVAL_EL2, CGT_HCR_NV), SR_RANGE_TRAP(SYS_CNTHV_TVAL_EL2, SYS_CNTHV_CVAL_EL2, CGT_HCR_NV), - /* All _EL02, _EL12 registers */ + /* All _EL02, _EL12 registers up to CNTKCTL_EL12*/ SR_RANGE_TRAP(sys_reg(3, 5, 0, 0, 0), sys_reg(3, 5, 10, 15, 7), CGT_HCR_NV), SR_RANGE_TRAP(sys_reg(3, 5, 12, 0, 0), - sys_reg(3, 5, 14, 15, 7), CGT_HCR_NV), + sys_reg(3, 5, 14, 1, 0), CGT_HCR_NV), + SR_TRAP(SYS_CNTP_CTL_EL02, CGT_CNTHCTL_EL1NVPCT), + SR_TRAP(SYS_CNTP_CVAL_EL02, CGT_CNTHCTL_EL1NVPCT), + SR_TRAP(SYS_CNTV_CTL_EL02, CGT_CNTHCTL_EL1NVVCT), + SR_TRAP(SYS_CNTV_CVAL_EL02, CGT_CNTHCTL_EL1NVVCT), SR_TRAP(OP_AT_S1E2R, CGT_HCR_NV), SR_TRAP(OP_AT_S1E2W, CGT_HCR_NV), SR_TRAP(OP_AT_S12E1R, CGT_HCR_NV), @@ -1184,6 +1233,11 @@ static const struct encoding_to_trap_config encoding_to_cgt[] __initconst = { SR_TRAP(SYS_CNTP_CTL_EL0, CGT_CNTHCTL_EL1PTEN), SR_TRAP(SYS_CNTPCT_EL0, CGT_CNTHCTL_EL1PCTEN), SR_TRAP(SYS_CNTPCTSS_EL0, CGT_CNTHCTL_EL1PCTEN), + SR_TRAP(SYS_CNTV_TVAL_EL0, CGT_CNTHCTL_EL1TVT), + SR_TRAP(SYS_CNTV_CVAL_EL0, CGT_CNTHCTL_EL1TVT), + SR_TRAP(SYS_CNTV_CTL_EL0, CGT_CNTHCTL_EL1TVT), + SR_TRAP(SYS_CNTVCT_EL0, CGT_CNTHCTL_EL1TVCT), + SR_TRAP(SYS_CNTVCTSS_EL0, CGT_CNTHCTL_EL1TVCT), SR_TRAP(SYS_FPMR, CGT_HCRX_EnFPM), /* * IMPDEF choice: @@ -2345,14 +2399,14 @@ inject: return true; } -static bool forward_traps(struct kvm_vcpu *vcpu, u64 control_bit) +static bool __forward_traps(struct kvm_vcpu *vcpu, unsigned int reg, u64 control_bit) { bool control_bit_set; if (!vcpu_has_nv(vcpu)) return false; - control_bit_set = __vcpu_sys_reg(vcpu, HCR_EL2) & control_bit; + control_bit_set = __vcpu_sys_reg(vcpu, reg) & control_bit; if (!is_hyp_ctxt(vcpu) && control_bit_set) { kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu)); return true; @@ -2360,9 +2414,24 @@ static bool forward_traps(struct kvm_vcpu *vcpu, u64 control_bit) return false; } +static bool forward_hcr_traps(struct kvm_vcpu *vcpu, u64 control_bit) +{ + return __forward_traps(vcpu, HCR_EL2, control_bit); +} + bool forward_smc_trap(struct kvm_vcpu *vcpu) { - return forward_traps(vcpu, HCR_TSC); + return forward_hcr_traps(vcpu, HCR_TSC); +} + +static bool forward_mdcr_traps(struct kvm_vcpu *vcpu, u64 control_bit) +{ + return __forward_traps(vcpu, MDCR_EL2, control_bit); +} + +bool forward_debug_exception(struct kvm_vcpu *vcpu) +{ + return forward_mdcr_traps(vcpu, MDCR_EL2_TDE); } static u64 kvm_check_illegal_exception_return(struct kvm_vcpu *vcpu, u64 spsr) @@ -2406,7 +2475,7 @@ void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu) * Forward this trap to the virtual EL2 if the virtual * HCR_EL2.NV bit is set and this is coming from !EL2. */ - if (forward_traps(vcpu, HCR_NV)) + if (forward_hcr_traps(vcpu, HCR_NV)) return; spsr = vcpu_read_sys_reg(vcpu, SPSR_EL2); diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c index 98718bd65bf1..4d3d1a2eb157 100644 --- a/arch/arm64/kvm/fpsimd.c +++ b/arch/arm64/kvm/fpsimd.c @@ -65,14 +65,14 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu) *host_data_ptr(fpsimd_state) = kern_hyp_va(¤t->thread.uw.fpsimd_state); *host_data_ptr(fpmr_ptr) = kern_hyp_va(¤t->thread.uw.fpmr); - vcpu_clear_flag(vcpu, HOST_SVE_ENABLED); + host_data_clear_flag(HOST_SVE_ENABLED); if (read_sysreg(cpacr_el1) & CPACR_EL1_ZEN_EL0EN) - vcpu_set_flag(vcpu, HOST_SVE_ENABLED); + host_data_set_flag(HOST_SVE_ENABLED); if (system_supports_sme()) { - vcpu_clear_flag(vcpu, HOST_SME_ENABLED); + host_data_clear_flag(HOST_SME_ENABLED); if (read_sysreg(cpacr_el1) & CPACR_EL1_SMEN_EL0EN) - vcpu_set_flag(vcpu, HOST_SME_ENABLED); + host_data_set_flag(HOST_SME_ENABLED); /* * If PSTATE.SM is enabled then save any pending FP @@ -168,7 +168,7 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu) */ if (has_vhe() && system_supports_sme()) { /* Also restore EL0 state seen on entry */ - if (vcpu_get_flag(vcpu, HOST_SME_ENABLED)) + if (host_data_test_flag(HOST_SME_ENABLED)) sysreg_clear_set(CPACR_EL1, 0, CPACR_EL1_SMEN); else sysreg_clear_set(CPACR_EL1, @@ -227,7 +227,7 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu) * for EL0. To avoid spurious traps, restore the trap state * seen by kvm_arch_vcpu_load_fp(): */ - if (vcpu_get_flag(vcpu, HOST_SVE_ENABLED)) + if (host_data_test_flag(HOST_SVE_ENABLED)) sysreg_clear_set(CPACR_EL1, 0, CPACR_EL1_ZEN_EL0EN); else sysreg_clear_set(CPACR_EL1, CPACR_EL1_ZEN_EL0EN, 0); diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c index 12dad841f2a5..2196979a24a3 100644 --- a/arch/arm64/kvm/guest.c +++ b/arch/arm64/kvm/guest.c @@ -917,31 +917,24 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg) { - int ret = 0; - trace_kvm_set_guest_debug(vcpu, dbg->control); - if (dbg->control & ~KVM_GUESTDBG_VALID_MASK) { - ret = -EINVAL; - goto out; - } - - if (dbg->control & KVM_GUESTDBG_ENABLE) { - vcpu->guest_debug = dbg->control; - - /* Hardware assisted Break and Watch points */ - if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) { - vcpu->arch.external_debug_state = dbg->arch; - } + if (dbg->control & ~KVM_GUESTDBG_VALID_MASK) + return -EINVAL; - } else { - /* If not enabled clear all flags */ + if (!(dbg->control & KVM_GUESTDBG_ENABLE)) { vcpu->guest_debug = 0; - vcpu_clear_flag(vcpu, DBG_SS_ACTIVE_PENDING); + vcpu_clear_flag(vcpu, HOST_SS_ACTIVE_PENDING); + return 0; } -out: - return ret; + vcpu->guest_debug = dbg->control; + + /* Hardware assisted Break and Watch points */ + if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) + vcpu->arch.external_debug_state = dbg->arch; + + return 0; } int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu, diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index d7c2990e7c9e..512d152233ff 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c @@ -183,6 +183,9 @@ static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu) struct kvm_run *run = vcpu->run; u64 esr = kvm_vcpu_get_esr(vcpu); + if (!vcpu->guest_debug && forward_debug_exception(vcpu)) + return 1; + run->exit_reason = KVM_EXIT_DEBUG; run->debug.arch.hsr = lower_32_bits(esr); run->debug.arch.hsr_high = upper_32_bits(esr); @@ -193,7 +196,7 @@ static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu) run->debug.arch.far = vcpu->arch.fault.far_el2; break; case ESR_ELx_EC_SOFTSTP_LOW: - vcpu_clear_flag(vcpu, DBG_SS_ACTIVE_PENDING); + *vcpu_cpsr(vcpu) |= DBG_SPSR_SS; break; } diff --git a/arch/arm64/kvm/hyp/include/hyp/debug-sr.h b/arch/arm64/kvm/hyp/include/hyp/debug-sr.h index d00093699aaf..502a5b73ee70 100644 --- a/arch/arm64/kvm/hyp/include/hyp/debug-sr.h +++ b/arch/arm64/kvm/hyp/include/hyp/debug-sr.h @@ -88,15 +88,26 @@ default: write_debug(ptr[0], reg, 0); \ } +static struct kvm_guest_debug_arch *__vcpu_debug_regs(struct kvm_vcpu *vcpu) +{ + switch (vcpu->arch.debug_owner) { + case VCPU_DEBUG_FREE: + WARN_ON_ONCE(1); + fallthrough; + case VCPU_DEBUG_GUEST_OWNED: + return &vcpu->arch.vcpu_debug_state; + case VCPU_DEBUG_HOST_OWNED: + return &vcpu->arch.external_debug_state; + } + + return NULL; +} + static void __debug_save_state(struct kvm_guest_debug_arch *dbg, struct kvm_cpu_context *ctxt) { - u64 aa64dfr0; - int brps, wrps; - - aa64dfr0 = read_sysreg(id_aa64dfr0_el1); - brps = (aa64dfr0 >> 12) & 0xf; - wrps = (aa64dfr0 >> 20) & 0xf; + int brps = *host_data_ptr(debug_brps); + int wrps = *host_data_ptr(debug_wrps); save_debug(dbg->dbg_bcr, dbgbcr, brps); save_debug(dbg->dbg_bvr, dbgbvr, brps); @@ -109,13 +120,8 @@ static void __debug_save_state(struct kvm_guest_debug_arch *dbg, static void __debug_restore_state(struct kvm_guest_debug_arch *dbg, struct kvm_cpu_context *ctxt) { - u64 aa64dfr0; - int brps, wrps; - - aa64dfr0 = read_sysreg(id_aa64dfr0_el1); - - brps = (aa64dfr0 >> 12) & 0xf; - wrps = (aa64dfr0 >> 20) & 0xf; + int brps = *host_data_ptr(debug_brps); + int wrps = *host_data_ptr(debug_wrps); restore_debug(dbg->dbg_bcr, dbgbcr, brps); restore_debug(dbg->dbg_bvr, dbgbvr, brps); @@ -132,13 +138,13 @@ static inline void __debug_switch_to_guest_common(struct kvm_vcpu *vcpu) struct kvm_guest_debug_arch *host_dbg; struct kvm_guest_debug_arch *guest_dbg; - if (!vcpu_get_flag(vcpu, DEBUG_DIRTY)) + if (!kvm_debug_regs_in_use(vcpu)) return; host_ctxt = host_data_ptr(host_ctxt); guest_ctxt = &vcpu->arch.ctxt; host_dbg = host_data_ptr(host_debug_state.regs); - guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr); + guest_dbg = __vcpu_debug_regs(vcpu); __debug_save_state(host_dbg, host_ctxt); __debug_restore_state(guest_dbg, guest_ctxt); @@ -151,18 +157,16 @@ static inline void __debug_switch_to_host_common(struct kvm_vcpu *vcpu) struct kvm_guest_debug_arch *host_dbg; struct kvm_guest_debug_arch *guest_dbg; - if (!vcpu_get_flag(vcpu, DEBUG_DIRTY)) + if (!kvm_debug_regs_in_use(vcpu)) return; host_ctxt = host_data_ptr(host_ctxt); guest_ctxt = &vcpu->arch.ctxt; host_dbg = host_data_ptr(host_debug_state.regs); - guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr); + guest_dbg = __vcpu_debug_regs(vcpu); __debug_save_state(guest_dbg, guest_ctxt); __debug_restore_state(host_dbg, host_ctxt); - - vcpu_clear_flag(vcpu, DEBUG_DIRTY); } #endif /* __ARM64_KVM_HYP_DEBUG_SR_H__ */ diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index abfa6ad92e91..f838a45665f2 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -501,7 +501,12 @@ static inline bool handle_tx2_tvm(struct kvm_vcpu *vcpu) return true; } -static bool kvm_hyp_handle_cntpct(struct kvm_vcpu *vcpu) +static inline u64 compute_counter_value(struct arch_timer_context *ctxt) +{ + return arch_timer_read_cntpct_el0() - timer_get_offset(ctxt); +} + +static bool kvm_handle_cntxct(struct kvm_vcpu *vcpu) { struct arch_timer_context *ctxt; u32 sysreg; @@ -511,18 +516,19 @@ static bool kvm_hyp_handle_cntpct(struct kvm_vcpu *vcpu) * We only get here for 64bit guests, 32bit guests will hit * the long and winding road all the way to the standard * handling. Yes, it sucks to be irrelevant. + * + * Also, we only deal with non-hypervisor context here (either + * an EL1 guest, or a non-HYP context of an EL2 guest). */ + if (is_hyp_ctxt(vcpu)) + return false; + sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_esr(vcpu)); switch (sysreg) { case SYS_CNTPCT_EL0: case SYS_CNTPCTSS_EL0: if (vcpu_has_nv(vcpu)) { - if (is_hyp_ctxt(vcpu)) { - ctxt = vcpu_hptimer(vcpu); - break; - } - /* Check for guest hypervisor trapping */ val = __vcpu_sys_reg(vcpu, CNTHCTL_EL2); if (!vcpu_el2_e2h_is_set(vcpu)) @@ -534,16 +540,23 @@ static bool kvm_hyp_handle_cntpct(struct kvm_vcpu *vcpu) ctxt = vcpu_ptimer(vcpu); break; + case SYS_CNTVCT_EL0: + case SYS_CNTVCTSS_EL0: + if (vcpu_has_nv(vcpu)) { + /* Check for guest hypervisor trapping */ + val = __vcpu_sys_reg(vcpu, CNTHCTL_EL2); + + if (val & CNTHCTL_EL1TVCT) + return false; + } + + ctxt = vcpu_vtimer(vcpu); + break; default: return false; } - val = arch_timer_read_cntpct_el0(); - - if (ctxt->offset.vm_offset) - val -= *kern_hyp_va(ctxt->offset.vm_offset); - if (ctxt->offset.vcpu_offset) - val -= *kern_hyp_va(ctxt->offset.vcpu_offset); + val = compute_counter_value(ctxt); vcpu_set_reg(vcpu, kvm_vcpu_sys_get_rt(vcpu), val); __kvm_skip_instr(vcpu); @@ -588,7 +601,7 @@ static bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code) __vgic_v3_perform_cpuif_access(vcpu) == 1) return true; - if (kvm_hyp_handle_cntpct(vcpu)) + if (kvm_handle_cntxct(vcpu)) return true; return false; diff --git a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h index a651c43ad679..76ff095c6b6e 100644 --- a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h +++ b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h @@ -18,9 +18,34 @@ static inline bool ctxt_has_s1poe(struct kvm_cpu_context *ctxt); +static inline struct kvm_vcpu *ctxt_to_vcpu(struct kvm_cpu_context *ctxt) +{ + struct kvm_vcpu *vcpu = ctxt->__hyp_running_vcpu; + + if (!vcpu) + vcpu = container_of(ctxt, struct kvm_vcpu, arch.ctxt); + + return vcpu; +} + +static inline bool ctxt_is_guest(struct kvm_cpu_context *ctxt) +{ + return host_data_ptr(host_ctxt) != ctxt; +} + +static inline u64 *ctxt_mdscr_el1(struct kvm_cpu_context *ctxt) +{ + struct kvm_vcpu *vcpu = ctxt_to_vcpu(ctxt); + + if (ctxt_is_guest(ctxt) && kvm_host_owns_debug_regs(vcpu)) + return &vcpu->arch.external_mdscr_el1; + + return &ctxt_sys_reg(ctxt, MDSCR_EL1); +} + static inline void __sysreg_save_common_state(struct kvm_cpu_context *ctxt) { - ctxt_sys_reg(ctxt, MDSCR_EL1) = read_sysreg(mdscr_el1); + *ctxt_mdscr_el1(ctxt) = read_sysreg(mdscr_el1); // POR_EL0 can affect uaccess, so must be saved/restored early. if (ctxt_has_s1poe(ctxt)) @@ -33,16 +58,6 @@ static inline void __sysreg_save_user_state(struct kvm_cpu_context *ctxt) ctxt_sys_reg(ctxt, TPIDRRO_EL0) = read_sysreg(tpidrro_el0); } -static inline struct kvm_vcpu *ctxt_to_vcpu(struct kvm_cpu_context *ctxt) -{ - struct kvm_vcpu *vcpu = ctxt->__hyp_running_vcpu; - - if (!vcpu) - vcpu = container_of(ctxt, struct kvm_vcpu, arch.ctxt); - - return vcpu; -} - static inline bool ctxt_has_mte(struct kvm_cpu_context *ctxt) { struct kvm_vcpu *vcpu = ctxt_to_vcpu(ctxt); @@ -139,7 +154,7 @@ static inline void __sysreg_save_el2_return_state(struct kvm_cpu_context *ctxt) static inline void __sysreg_restore_common_state(struct kvm_cpu_context *ctxt) { - write_sysreg(ctxt_sys_reg(ctxt, MDSCR_EL1), mdscr_el1); + write_sysreg(*ctxt_mdscr_el1(ctxt), mdscr_el1); // POR_EL0 can affect uaccess, so must be saved/restored early. if (ctxt_has_s1poe(ctxt)) @@ -283,7 +298,7 @@ static inline void __sysreg32_save_state(struct kvm_vcpu *vcpu) __vcpu_sys_reg(vcpu, DACR32_EL2) = read_sysreg(dacr32_el2); __vcpu_sys_reg(vcpu, IFSR32_EL2) = read_sysreg(ifsr32_el2); - if (has_vhe() || vcpu_get_flag(vcpu, DEBUG_DIRTY)) + if (has_vhe() || kvm_debug_regs_in_use(vcpu)) __vcpu_sys_reg(vcpu, DBGVCR32_EL2) = read_sysreg(dbgvcr32_el2); } @@ -300,7 +315,7 @@ static inline void __sysreg32_restore_state(struct kvm_vcpu *vcpu) write_sysreg(__vcpu_sys_reg(vcpu, DACR32_EL2), dacr32_el2); write_sysreg(__vcpu_sys_reg(vcpu, IFSR32_EL2), ifsr32_el2); - if (has_vhe() || vcpu_get_flag(vcpu, DEBUG_DIRTY)) + if (has_vhe() || kvm_debug_regs_in_use(vcpu)) write_sysreg(__vcpu_sys_reg(vcpu, DBGVCR32_EL2), dbgvcr32_el2); } diff --git a/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h b/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h deleted file mode 100644 index f957890c7e38..000000000000 --- a/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h +++ /dev/null @@ -1,223 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) 2021 Google LLC - * Author: Fuad Tabba <tabba@google.com> - */ - -#ifndef __ARM64_KVM_FIXED_CONFIG_H__ -#define __ARM64_KVM_FIXED_CONFIG_H__ - -#include <asm/sysreg.h> - -/* - * This file contains definitions for features to be allowed or restricted for - * guest virtual machines, depending on the mode KVM is running in and on the - * type of guest that is running. - * - * The ALLOW masks represent a bitmask of feature fields that are allowed - * without any restrictions as long as they are supported by the system. - * - * The RESTRICT_UNSIGNED masks, if present, represent unsigned fields for - * features that are restricted to support at most the specified feature. - * - * If a feature field is not present in either, than it is not supported. - * - * The approach taken for protected VMs is to allow features that are: - * - Needed by common Linux distributions (e.g., floating point) - * - Trivial to support, e.g., supporting the feature does not introduce or - * require tracking of additional state in KVM - * - Cannot be trapped or prevent the guest from using anyway - */ - -/* - * Allow for protected VMs: - * - Floating-point and Advanced SIMD - * - Data Independent Timing - * - Spectre/Meltdown Mitigation - */ -#define PVM_ID_AA64PFR0_ALLOW (\ - ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_FP) | \ - ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AdvSIMD) | \ - ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_DIT) | \ - ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2) | \ - ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3) \ - ) - -/* - * Restrict to the following *unsigned* features for protected VMs: - * - AArch64 guests only (no support for AArch32 guests): - * AArch32 adds complexity in trap handling, emulation, condition codes, - * etc... - * - RAS (v1) - * Supported by KVM - */ -#define PVM_ID_AA64PFR0_RESTRICT_UNSIGNED (\ - SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, EL0, IMP) | \ - SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, EL1, IMP) | \ - SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, EL2, IMP) | \ - SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, EL3, IMP) | \ - SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, RAS, IMP) \ - ) - -/* - * Allow for protected VMs: - * - Branch Target Identification - * - Speculative Store Bypassing - */ -#define PVM_ID_AA64PFR1_ALLOW (\ - ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_BT) | \ - ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_SSBS) \ - ) - -#define PVM_ID_AA64PFR2_ALLOW 0ULL - -/* - * Allow for protected VMs: - * - Mixed-endian - * - Distinction between Secure and Non-secure Memory - * - Mixed-endian at EL0 only - * - Non-context synchronizing exception entry and exit - */ -#define PVM_ID_AA64MMFR0_ALLOW (\ - ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_BIGEND) | \ - ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_SNSMEM) | \ - ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_BIGENDEL0) | \ - ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_EXS) \ - ) - -/* - * Restrict to the following *unsigned* features for protected VMs: - * - 40-bit IPA - * - 16-bit ASID - */ -#define PVM_ID_AA64MMFR0_RESTRICT_UNSIGNED (\ - FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_PARANGE), ID_AA64MMFR0_EL1_PARANGE_40) | \ - FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_ASIDBITS), ID_AA64MMFR0_EL1_ASIDBITS_16) \ - ) - -/* - * Allow for protected VMs: - * - Hardware translation table updates to Access flag and Dirty state - * - Number of VMID bits from CPU - * - Hierarchical Permission Disables - * - Privileged Access Never - * - SError interrupt exceptions from speculative reads - * - Enhanced Translation Synchronization - * - Control for cache maintenance permission - */ -#define PVM_ID_AA64MMFR1_ALLOW (\ - ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_HAFDBS) | \ - ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_VMIDBits) | \ - ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_HPDS) | \ - ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_PAN) | \ - ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_SpecSEI) | \ - ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_ETS) | \ - ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_CMOW) \ - ) - -/* - * Allow for protected VMs: - * - Common not Private translations - * - User Access Override - * - IESB bit in the SCTLR_ELx registers - * - Unaligned single-copy atomicity and atomic functions - * - ESR_ELx.EC value on an exception by read access to feature ID space - * - TTL field in address operations. - * - Break-before-make sequences when changing translation block size - * - E0PDx mechanism - */ -#define PVM_ID_AA64MMFR2_ALLOW (\ - ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_CnP) | \ - ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_UAO) | \ - ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_IESB) | \ - ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_AT) | \ - ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_IDS) | \ - ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_TTL) | \ - ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_BBM) | \ - ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_E0PD) \ - ) - -#define PVM_ID_AA64MMFR3_ALLOW (0ULL) - -/* - * No support for Scalable Vectors for protected VMs: - * Requires additional support from KVM, e.g., context-switching and - * trapping at EL2 - */ -#define PVM_ID_AA64ZFR0_ALLOW (0ULL) - -/* - * No support for debug, including breakpoints, and watchpoints for protected - * VMs: - * The Arm architecture mandates support for at least the Armv8 debug - * architecture, which would include at least 2 hardware breakpoints and - * watchpoints. Providing that support to protected guests adds - * considerable state and complexity. Therefore, the reserved value of 0 is - * used for debug-related fields. - */ -#define PVM_ID_AA64DFR0_ALLOW (0ULL) -#define PVM_ID_AA64DFR1_ALLOW (0ULL) - -/* - * No support for implementation defined features. - */ -#define PVM_ID_AA64AFR0_ALLOW (0ULL) -#define PVM_ID_AA64AFR1_ALLOW (0ULL) - -/* - * No restrictions on instructions implemented in AArch64. - */ -#define PVM_ID_AA64ISAR0_ALLOW (\ - ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_AES) | \ - ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_SHA1) | \ - ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_SHA2) | \ - ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_CRC32) | \ - ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_ATOMIC) | \ - ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_RDM) | \ - ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_SHA3) | \ - ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_SM3) | \ - ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_SM4) | \ - ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_DP) | \ - ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_FHM) | \ - ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_TS) | \ - ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_TLB) | \ - ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_RNDR) \ - ) - -/* Restrict pointer authentication to the basic version. */ -#define PVM_ID_AA64ISAR1_RESTRICT_UNSIGNED (\ - FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_APA), ID_AA64ISAR1_EL1_APA_PAuth) | \ - FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_API), ID_AA64ISAR1_EL1_API_PAuth) \ - ) - -#define PVM_ID_AA64ISAR2_RESTRICT_UNSIGNED (\ - FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_APA3), ID_AA64ISAR2_EL1_APA3_PAuth) \ - ) - -#define PVM_ID_AA64ISAR1_ALLOW (\ - ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_DPB) | \ - ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_JSCVT) | \ - ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_FCMA) | \ - ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_LRCPC) | \ - ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPA) | \ - ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPI) | \ - ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_FRINTTS) | \ - ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_SB) | \ - ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_SPECRES) | \ - ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_BF16) | \ - ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_DGH) | \ - ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_I8MM) \ - ) - -#define PVM_ID_AA64ISAR2_ALLOW (\ - ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_ATS1A)| \ - ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_GPA3) | \ - ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_MOPS) \ - ) - -u64 pvm_read_id_reg(const struct kvm_vcpu *vcpu, u32 id); -bool kvm_handle_pvm_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code); -bool kvm_handle_pvm_restricted(struct kvm_vcpu *vcpu, u64 *exit_code); -int kvm_check_pvm_sysreg_table(void); - -#endif /* __ARM64_KVM_FIXED_CONFIG_H__ */ diff --git a/arch/arm64/kvm/hyp/include/nvhe/gfp.h b/arch/arm64/kvm/hyp/include/nvhe/gfp.h index 97c527ef53c2..3766333bace9 100644 --- a/arch/arm64/kvm/hyp/include/nvhe/gfp.h +++ b/arch/arm64/kvm/hyp/include/nvhe/gfp.h @@ -7,7 +7,7 @@ #include <nvhe/memory.h> #include <nvhe/spinlock.h> -#define HYP_NO_ORDER USHRT_MAX +#define HYP_NO_ORDER ((u8)(~0)) struct hyp_pool { /* @@ -19,11 +19,11 @@ struct hyp_pool { struct list_head free_area[NR_PAGE_ORDERS]; phys_addr_t range_start; phys_addr_t range_end; - unsigned short max_order; + u8 max_order; }; /* Allocation */ -void *hyp_alloc_pages(struct hyp_pool *pool, unsigned short order); +void *hyp_alloc_pages(struct hyp_pool *pool, u8 order); void hyp_split_page(struct hyp_page *page); void hyp_get_page(struct hyp_pool *pool, void *addr); void hyp_put_page(struct hyp_pool *pool, void *addr); diff --git a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h index 0972faccc2af..978f38c386ee 100644 --- a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h +++ b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h @@ -11,40 +11,10 @@ #include <asm/kvm_mmu.h> #include <asm/kvm_pgtable.h> #include <asm/virt.h> +#include <nvhe/memory.h> #include <nvhe/pkvm.h> #include <nvhe/spinlock.h> -/* - * SW bits 0-1 are reserved to track the memory ownership state of each page: - * 00: The page is owned exclusively by the page-table owner. - * 01: The page is owned by the page-table owner, but is shared - * with another entity. - * 10: The page is shared with, but not owned by the page-table owner. - * 11: Reserved for future use (lending). - */ -enum pkvm_page_state { - PKVM_PAGE_OWNED = 0ULL, - PKVM_PAGE_SHARED_OWNED = KVM_PGTABLE_PROT_SW0, - PKVM_PAGE_SHARED_BORROWED = KVM_PGTABLE_PROT_SW1, - __PKVM_PAGE_RESERVED = KVM_PGTABLE_PROT_SW0 | - KVM_PGTABLE_PROT_SW1, - - /* Meta-states which aren't encoded directly in the PTE's SW bits */ - PKVM_NOPAGE, -}; - -#define PKVM_PAGE_STATE_PROT_MASK (KVM_PGTABLE_PROT_SW0 | KVM_PGTABLE_PROT_SW1) -static inline enum kvm_pgtable_prot pkvm_mkstate(enum kvm_pgtable_prot prot, - enum pkvm_page_state state) -{ - return (prot & ~PKVM_PAGE_STATE_PROT_MASK) | state; -} - -static inline enum pkvm_page_state pkvm_getstate(enum kvm_pgtable_prot prot) -{ - return prot & PKVM_PAGE_STATE_PROT_MASK; -} - struct host_mmu { struct kvm_arch arch; struct kvm_pgtable pgt; @@ -69,6 +39,13 @@ int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages); int __pkvm_hyp_donate_host(u64 pfn, u64 nr_pages); int __pkvm_host_share_ffa(u64 pfn, u64 nr_pages); int __pkvm_host_unshare_ffa(u64 pfn, u64 nr_pages); +int __pkvm_host_share_guest(u64 pfn, u64 gfn, struct pkvm_hyp_vcpu *vcpu, + enum kvm_pgtable_prot prot); +int __pkvm_host_unshare_guest(u64 gfn, struct pkvm_hyp_vm *hyp_vm); +int __pkvm_host_relax_perms_guest(u64 gfn, struct pkvm_hyp_vcpu *vcpu, enum kvm_pgtable_prot prot); +int __pkvm_host_wrprotect_guest(u64 gfn, struct pkvm_hyp_vm *hyp_vm); +int __pkvm_host_test_clear_young_guest(u64 gfn, bool mkold, struct pkvm_hyp_vm *vm); +int __pkvm_host_mkyoung_guest(u64 gfn, struct pkvm_hyp_vcpu *vcpu); bool addr_is_memory(phys_addr_t phys); int host_stage2_idmap_locked(phys_addr_t addr, u64 size, enum kvm_pgtable_prot prot); diff --git a/arch/arm64/kvm/hyp/include/nvhe/memory.h b/arch/arm64/kvm/hyp/include/nvhe/memory.h index ab205c4d6774..34233d586060 100644 --- a/arch/arm64/kvm/hyp/include/nvhe/memory.h +++ b/arch/arm64/kvm/hyp/include/nvhe/memory.h @@ -7,9 +7,47 @@ #include <linux/types.h> +/* + * Bits 0-1 are reserved to track the memory ownership state of each page: + * 00: The page is owned exclusively by the page-table owner. + * 01: The page is owned by the page-table owner, but is shared + * with another entity. + * 10: The page is shared with, but not owned by the page-table owner. + * 11: Reserved for future use (lending). + */ +enum pkvm_page_state { + PKVM_PAGE_OWNED = 0ULL, + PKVM_PAGE_SHARED_OWNED = BIT(0), + PKVM_PAGE_SHARED_BORROWED = BIT(1), + __PKVM_PAGE_RESERVED = BIT(0) | BIT(1), + + /* Meta-states which aren't encoded directly in the PTE's SW bits */ + PKVM_NOPAGE = BIT(2), +}; +#define PKVM_PAGE_META_STATES_MASK (~__PKVM_PAGE_RESERVED) + +#define PKVM_PAGE_STATE_PROT_MASK (KVM_PGTABLE_PROT_SW0 | KVM_PGTABLE_PROT_SW1) +static inline enum kvm_pgtable_prot pkvm_mkstate(enum kvm_pgtable_prot prot, + enum pkvm_page_state state) +{ + prot &= ~PKVM_PAGE_STATE_PROT_MASK; + prot |= FIELD_PREP(PKVM_PAGE_STATE_PROT_MASK, state); + return prot; +} + +static inline enum pkvm_page_state pkvm_getstate(enum kvm_pgtable_prot prot) +{ + return FIELD_GET(PKVM_PAGE_STATE_PROT_MASK, prot); +} + struct hyp_page { - unsigned short refcount; - unsigned short order; + u16 refcount; + u8 order; + + /* Host (non-meta) state. Guarded by the host stage-2 lock. */ + enum pkvm_page_state host_state : 8; + + u32 host_share_guest_count; }; extern u64 __hyp_vmemmap; @@ -29,7 +67,13 @@ static inline phys_addr_t hyp_virt_to_phys(void *addr) #define hyp_phys_to_pfn(phys) ((phys) >> PAGE_SHIFT) #define hyp_pfn_to_phys(pfn) ((phys_addr_t)((pfn) << PAGE_SHIFT)) -#define hyp_phys_to_page(phys) (&hyp_vmemmap[hyp_phys_to_pfn(phys)]) + +static inline struct hyp_page *hyp_phys_to_page(phys_addr_t phys) +{ + BUILD_BUG_ON(sizeof(struct hyp_page) != sizeof(u64)); + return &hyp_vmemmap[hyp_phys_to_pfn(phys)]; +} + #define hyp_virt_to_page(virt) hyp_phys_to_page(__hyp_pa(virt)) #define hyp_virt_to_pfn(virt) hyp_phys_to_pfn(__hyp_pa(virt)) diff --git a/arch/arm64/kvm/hyp/include/nvhe/pkvm.h b/arch/arm64/kvm/hyp/include/nvhe/pkvm.h index 24a9a8330d19..e42bf68c8848 100644 --- a/arch/arm64/kvm/hyp/include/nvhe/pkvm.h +++ b/arch/arm64/kvm/hyp/include/nvhe/pkvm.h @@ -20,6 +20,12 @@ struct pkvm_hyp_vcpu { /* Backpointer to the host's (untrusted) vCPU instance. */ struct kvm_vcpu *host_vcpu; + + /* + * If this hyp vCPU is loaded, then this is a backpointer to the + * per-cpu pointer tracking us. Otherwise, NULL if not loaded. + */ + struct pkvm_hyp_vcpu **loaded_hyp_vcpu; }; /* @@ -47,6 +53,8 @@ struct pkvm_hyp_vm { struct pkvm_hyp_vcpu *vcpus[]; }; +extern hyp_spinlock_t vm_table_lock; + static inline struct pkvm_hyp_vm * pkvm_hyp_vcpu_to_hyp_vm(struct pkvm_hyp_vcpu *hyp_vcpu) { @@ -58,6 +66,11 @@ static inline bool pkvm_hyp_vcpu_is_protected(struct pkvm_hyp_vcpu *hyp_vcpu) return vcpu_is_protected(&hyp_vcpu->vcpu); } +static inline bool pkvm_hyp_vm_is_protected(struct pkvm_hyp_vm *hyp_vm) +{ + return kvm_vm_is_protected(&hyp_vm->kvm); +} + void pkvm_hyp_vm_table_init(void *tbl); int __pkvm_init_vm(struct kvm *host_kvm, unsigned long vm_hva, @@ -69,5 +82,15 @@ int __pkvm_teardown_vm(pkvm_handle_t handle); struct pkvm_hyp_vcpu *pkvm_load_hyp_vcpu(pkvm_handle_t handle, unsigned int vcpu_idx); void pkvm_put_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu); +struct pkvm_hyp_vcpu *pkvm_get_loaded_hyp_vcpu(void); + +struct pkvm_hyp_vm *get_pkvm_hyp_vm(pkvm_handle_t handle); +struct pkvm_hyp_vm *get_np_pkvm_hyp_vm(pkvm_handle_t handle); +void put_pkvm_hyp_vm(struct pkvm_hyp_vm *hyp_vm); + +bool kvm_handle_pvm_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code); +bool kvm_handle_pvm_restricted(struct kvm_vcpu *vcpu, u64 *exit_code); +void kvm_init_pvm_id_regs(struct kvm_vcpu *vcpu); +int kvm_check_pvm_sysreg_table(void); #endif /* __ARM64_KVM_NVHE_PKVM_H__ */ diff --git a/arch/arm64/kvm/hyp/nvhe/debug-sr.c b/arch/arm64/kvm/hyp/nvhe/debug-sr.c index 53efda0235cf..2f4a4f5036bb 100644 --- a/arch/arm64/kvm/hyp/nvhe/debug-sr.c +++ b/arch/arm64/kvm/hyp/nvhe/debug-sr.c @@ -51,42 +51,55 @@ static void __debug_restore_spe(u64 pmscr_el1) write_sysreg_el1(pmscr_el1, SYS_PMSCR); } -static void __debug_save_trace(u64 *trfcr_el1) +static void __trace_do_switch(u64 *saved_trfcr, u64 new_trfcr) { - *trfcr_el1 = 0; + *saved_trfcr = read_sysreg_el1(SYS_TRFCR); + write_sysreg_el1(new_trfcr, SYS_TRFCR); +} - /* Check if the TRBE is enabled */ - if (!(read_sysreg_s(SYS_TRBLIMITR_EL1) & TRBLIMITR_EL1_E)) - return; - /* - * Prohibit trace generation while we are in guest. - * Since access to TRFCR_EL1 is trapped, the guest can't - * modify the filtering set by the host. - */ - *trfcr_el1 = read_sysreg_el1(SYS_TRFCR); - write_sysreg_el1(0, SYS_TRFCR); - isb(); - /* Drain the trace buffer to memory */ - tsb_csync(); +static bool __trace_needs_drain(void) +{ + if (is_protected_kvm_enabled() && host_data_test_flag(HAS_TRBE)) + return read_sysreg_s(SYS_TRBLIMITR_EL1) & TRBLIMITR_EL1_E; + + return host_data_test_flag(TRBE_ENABLED); } -static void __debug_restore_trace(u64 trfcr_el1) +static bool __trace_needs_switch(void) { - if (!trfcr_el1) - return; + return host_data_test_flag(TRBE_ENABLED) || + host_data_test_flag(EL1_TRACING_CONFIGURED); +} - /* Restore trace filter controls */ - write_sysreg_el1(trfcr_el1, SYS_TRFCR); +static void __trace_switch_to_guest(void) +{ + /* Unsupported with TRBE so disable */ + if (host_data_test_flag(TRBE_ENABLED)) + *host_data_ptr(trfcr_while_in_guest) = 0; + + __trace_do_switch(host_data_ptr(host_debug_state.trfcr_el1), + *host_data_ptr(trfcr_while_in_guest)); + + if (__trace_needs_drain()) { + isb(); + tsb_csync(); + } +} + +static void __trace_switch_to_host(void) +{ + __trace_do_switch(host_data_ptr(trfcr_while_in_guest), + *host_data_ptr(host_debug_state.trfcr_el1)); } void __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu) { /* Disable and flush SPE data generation */ - if (vcpu_get_flag(vcpu, DEBUG_STATE_SAVE_SPE)) + if (host_data_test_flag(HAS_SPE)) __debug_save_spe(host_data_ptr(host_debug_state.pmscr_el1)); - /* Disable and flush Self-Hosted Trace generation */ - if (vcpu_get_flag(vcpu, DEBUG_STATE_SAVE_TRBE)) - __debug_save_trace(host_data_ptr(host_debug_state.trfcr_el1)); + + if (__trace_needs_switch()) + __trace_switch_to_guest(); } void __debug_switch_to_guest(struct kvm_vcpu *vcpu) @@ -96,18 +109,13 @@ void __debug_switch_to_guest(struct kvm_vcpu *vcpu) void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu) { - if (vcpu_get_flag(vcpu, DEBUG_STATE_SAVE_SPE)) + if (host_data_test_flag(HAS_SPE)) __debug_restore_spe(*host_data_ptr(host_debug_state.pmscr_el1)); - if (vcpu_get_flag(vcpu, DEBUG_STATE_SAVE_TRBE)) - __debug_restore_trace(*host_data_ptr(host_debug_state.trfcr_el1)); + if (__trace_needs_switch()) + __trace_switch_to_host(); } void __debug_switch_to_host(struct kvm_vcpu *vcpu) { __debug_switch_to_host_common(vcpu); } - -u64 __kvm_get_mdcr_el2(void) -{ - return read_sysreg(mdcr_el2); -} diff --git a/arch/arm64/kvm/hyp/nvhe/host.S b/arch/arm64/kvm/hyp/nvhe/host.S index 3d610fc51f4d..58f0cb2298cc 100644 --- a/arch/arm64/kvm/hyp/nvhe/host.S +++ b/arch/arm64/kvm/hyp/nvhe/host.S @@ -188,12 +188,12 @@ SYM_FUNC_END(__host_hvc) /* * Test whether the SP has overflowed, without corrupting a GPR. - * nVHE hypervisor stacks are aligned so that the PAGE_SHIFT bit + * nVHE hypervisor stacks are aligned so that the NVHE_STACK_SHIFT bit * of SP should always be 1. */ add sp, sp, x0 // sp' = sp + x0 sub x0, sp, x0 // x0' = sp' - x0 = (sp + x0) - x0 = sp - tbz x0, #PAGE_SHIFT, .L__hyp_sp_overflow\@ + tbz x0, #NVHE_STACK_SHIFT, .L__hyp_sp_overflow\@ sub x0, sp, x0 // x0'' = sp' - x0' = (sp + x0) - sp = x0 sub sp, sp, x0 // sp'' = sp' - x0 = (sp + x0) - x0 = sp diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c index 6c90ef6736d6..5c134520e180 100644 --- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c +++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c @@ -103,8 +103,6 @@ static void flush_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu) /* Limit guest vector length to the maximum supported by the host. */ hyp_vcpu->vcpu.arch.sve_max_vl = min(host_vcpu->arch.sve_max_vl, kvm_host_sve_max_vl); - hyp_vcpu->vcpu.arch.hw_mmu = host_vcpu->arch.hw_mmu; - hyp_vcpu->vcpu.arch.mdcr_el2 = host_vcpu->arch.mdcr_el2; hyp_vcpu->vcpu.arch.hcr_el2 &= ~(HCR_TWI | HCR_TWE); hyp_vcpu->vcpu.arch.hcr_el2 |= READ_ONCE(host_vcpu->arch.hcr_el2) & @@ -112,8 +110,6 @@ static void flush_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu) hyp_vcpu->vcpu.arch.iflags = host_vcpu->arch.iflags; - hyp_vcpu->vcpu.arch.debug_ptr = kern_hyp_va(host_vcpu->arch.debug_ptr); - hyp_vcpu->vcpu.arch.vsesr_el2 = host_vcpu->arch.vsesr_el2; hyp_vcpu->vcpu.arch.vgic_cpu.vgic_v3 = host_vcpu->arch.vgic_cpu.vgic_v3; @@ -141,16 +137,46 @@ static void sync_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu) host_cpu_if->vgic_lr[i] = hyp_cpu_if->vgic_lr[i]; } +static void handle___pkvm_vcpu_load(struct kvm_cpu_context *host_ctxt) +{ + DECLARE_REG(pkvm_handle_t, handle, host_ctxt, 1); + DECLARE_REG(unsigned int, vcpu_idx, host_ctxt, 2); + DECLARE_REG(u64, hcr_el2, host_ctxt, 3); + struct pkvm_hyp_vcpu *hyp_vcpu; + + if (!is_protected_kvm_enabled()) + return; + + hyp_vcpu = pkvm_load_hyp_vcpu(handle, vcpu_idx); + if (!hyp_vcpu) + return; + + if (pkvm_hyp_vcpu_is_protected(hyp_vcpu)) { + /* Propagate WFx trapping flags */ + hyp_vcpu->vcpu.arch.hcr_el2 &= ~(HCR_TWE | HCR_TWI); + hyp_vcpu->vcpu.arch.hcr_el2 |= hcr_el2 & (HCR_TWE | HCR_TWI); + } +} + +static void handle___pkvm_vcpu_put(struct kvm_cpu_context *host_ctxt) +{ + struct pkvm_hyp_vcpu *hyp_vcpu; + + if (!is_protected_kvm_enabled()) + return; + + hyp_vcpu = pkvm_get_loaded_hyp_vcpu(); + if (hyp_vcpu) + pkvm_put_hyp_vcpu(hyp_vcpu); +} + static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt) { DECLARE_REG(struct kvm_vcpu *, host_vcpu, host_ctxt, 1); int ret; - host_vcpu = kern_hyp_va(host_vcpu); - if (unlikely(is_protected_kvm_enabled())) { - struct pkvm_hyp_vcpu *hyp_vcpu; - struct kvm *host_kvm; + struct pkvm_hyp_vcpu *hyp_vcpu = pkvm_get_loaded_hyp_vcpu(); /* * KVM (and pKVM) doesn't support SME guests for now, and @@ -163,9 +189,6 @@ static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt) goto out; } - host_kvm = kern_hyp_va(host_vcpu->kvm); - hyp_vcpu = pkvm_load_hyp_vcpu(host_kvm->arch.pkvm.handle, - host_vcpu->vcpu_idx); if (!hyp_vcpu) { ret = -EINVAL; goto out; @@ -176,12 +199,141 @@ static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt) ret = __kvm_vcpu_run(&hyp_vcpu->vcpu); sync_hyp_vcpu(hyp_vcpu); - pkvm_put_hyp_vcpu(hyp_vcpu); } else { /* The host is fully trusted, run its vCPU directly. */ - ret = __kvm_vcpu_run(host_vcpu); + ret = __kvm_vcpu_run(kern_hyp_va(host_vcpu)); } +out: + cpu_reg(host_ctxt, 1) = ret; +} + +static int pkvm_refill_memcache(struct pkvm_hyp_vcpu *hyp_vcpu) +{ + struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu; + + return refill_memcache(&hyp_vcpu->vcpu.arch.pkvm_memcache, + host_vcpu->arch.pkvm_memcache.nr_pages, + &host_vcpu->arch.pkvm_memcache); +} + +static void handle___pkvm_host_share_guest(struct kvm_cpu_context *host_ctxt) +{ + DECLARE_REG(u64, pfn, host_ctxt, 1); + DECLARE_REG(u64, gfn, host_ctxt, 2); + DECLARE_REG(enum kvm_pgtable_prot, prot, host_ctxt, 3); + struct pkvm_hyp_vcpu *hyp_vcpu; + int ret = -EINVAL; + if (!is_protected_kvm_enabled()) + goto out; + + hyp_vcpu = pkvm_get_loaded_hyp_vcpu(); + if (!hyp_vcpu || pkvm_hyp_vcpu_is_protected(hyp_vcpu)) + goto out; + + ret = pkvm_refill_memcache(hyp_vcpu); + if (ret) + goto out; + + ret = __pkvm_host_share_guest(pfn, gfn, hyp_vcpu, prot); +out: + cpu_reg(host_ctxt, 1) = ret; +} + +static void handle___pkvm_host_unshare_guest(struct kvm_cpu_context *host_ctxt) +{ + DECLARE_REG(pkvm_handle_t, handle, host_ctxt, 1); + DECLARE_REG(u64, gfn, host_ctxt, 2); + struct pkvm_hyp_vm *hyp_vm; + int ret = -EINVAL; + + if (!is_protected_kvm_enabled()) + goto out; + + hyp_vm = get_np_pkvm_hyp_vm(handle); + if (!hyp_vm) + goto out; + + ret = __pkvm_host_unshare_guest(gfn, hyp_vm); + put_pkvm_hyp_vm(hyp_vm); +out: + cpu_reg(host_ctxt, 1) = ret; +} + +static void handle___pkvm_host_relax_perms_guest(struct kvm_cpu_context *host_ctxt) +{ + DECLARE_REG(u64, gfn, host_ctxt, 1); + DECLARE_REG(enum kvm_pgtable_prot, prot, host_ctxt, 2); + struct pkvm_hyp_vcpu *hyp_vcpu; + int ret = -EINVAL; + + if (!is_protected_kvm_enabled()) + goto out; + + hyp_vcpu = pkvm_get_loaded_hyp_vcpu(); + if (!hyp_vcpu || pkvm_hyp_vcpu_is_protected(hyp_vcpu)) + goto out; + + ret = __pkvm_host_relax_perms_guest(gfn, hyp_vcpu, prot); +out: + cpu_reg(host_ctxt, 1) = ret; +} + +static void handle___pkvm_host_wrprotect_guest(struct kvm_cpu_context *host_ctxt) +{ + DECLARE_REG(pkvm_handle_t, handle, host_ctxt, 1); + DECLARE_REG(u64, gfn, host_ctxt, 2); + struct pkvm_hyp_vm *hyp_vm; + int ret = -EINVAL; + + if (!is_protected_kvm_enabled()) + goto out; + + hyp_vm = get_np_pkvm_hyp_vm(handle); + if (!hyp_vm) + goto out; + + ret = __pkvm_host_wrprotect_guest(gfn, hyp_vm); + put_pkvm_hyp_vm(hyp_vm); +out: + cpu_reg(host_ctxt, 1) = ret; +} + +static void handle___pkvm_host_test_clear_young_guest(struct kvm_cpu_context *host_ctxt) +{ + DECLARE_REG(pkvm_handle_t, handle, host_ctxt, 1); + DECLARE_REG(u64, gfn, host_ctxt, 2); + DECLARE_REG(bool, mkold, host_ctxt, 3); + struct pkvm_hyp_vm *hyp_vm; + int ret = -EINVAL; + + if (!is_protected_kvm_enabled()) + goto out; + + hyp_vm = get_np_pkvm_hyp_vm(handle); + if (!hyp_vm) + goto out; + + ret = __pkvm_host_test_clear_young_guest(gfn, mkold, hyp_vm); + put_pkvm_hyp_vm(hyp_vm); +out: + cpu_reg(host_ctxt, 1) = ret; +} + +static void handle___pkvm_host_mkyoung_guest(struct kvm_cpu_context *host_ctxt) +{ + DECLARE_REG(u64, gfn, host_ctxt, 1); + struct pkvm_hyp_vcpu *hyp_vcpu; + int ret = -EINVAL; + + if (!is_protected_kvm_enabled()) + goto out; + + hyp_vcpu = pkvm_get_loaded_hyp_vcpu(); + if (!hyp_vcpu || pkvm_hyp_vcpu_is_protected(hyp_vcpu)) + goto out; + + ret = __pkvm_host_mkyoung_guest(gfn, hyp_vcpu); out: cpu_reg(host_ctxt, 1) = ret; } @@ -233,6 +385,22 @@ static void handle___kvm_tlb_flush_vmid(struct kvm_cpu_context *host_ctxt) __kvm_tlb_flush_vmid(kern_hyp_va(mmu)); } +static void handle___pkvm_tlb_flush_vmid(struct kvm_cpu_context *host_ctxt) +{ + DECLARE_REG(pkvm_handle_t, handle, host_ctxt, 1); + struct pkvm_hyp_vm *hyp_vm; + + if (!is_protected_kvm_enabled()) + return; + + hyp_vm = get_np_pkvm_hyp_vm(handle); + if (!hyp_vm) + return; + + __kvm_tlb_flush_vmid(&hyp_vm->kvm.arch.mmu); + put_pkvm_hyp_vm(hyp_vm); +} + static void handle___kvm_flush_cpu_context(struct kvm_cpu_context *host_ctxt) { DECLARE_REG(struct kvm_s2_mmu *, mmu, host_ctxt, 1); @@ -264,11 +432,6 @@ static void handle___vgic_v3_init_lrs(struct kvm_cpu_context *host_ctxt) __vgic_v3_init_lrs(); } -static void handle___kvm_get_mdcr_el2(struct kvm_cpu_context *host_ctxt) -{ - cpu_reg(host_ctxt, 1) = __kvm_get_mdcr_el2(); -} - static void handle___vgic_v3_save_vmcr_aprs(struct kvm_cpu_context *host_ctxt) { DECLARE_REG(struct vgic_v3_cpu_if *, cpu_if, host_ctxt, 1); @@ -384,7 +547,6 @@ typedef void (*hcall_t)(struct kvm_cpu_context *); static const hcall_t host_hcall[] = { /* ___kvm_hyp_init */ - HANDLE_FUNC(__kvm_get_mdcr_el2), HANDLE_FUNC(__pkvm_init), HANDLE_FUNC(__pkvm_create_private_mapping), HANDLE_FUNC(__pkvm_cpu_set_vector), @@ -395,6 +557,12 @@ static const hcall_t host_hcall[] = { HANDLE_FUNC(__pkvm_host_share_hyp), HANDLE_FUNC(__pkvm_host_unshare_hyp), + HANDLE_FUNC(__pkvm_host_share_guest), + HANDLE_FUNC(__pkvm_host_unshare_guest), + HANDLE_FUNC(__pkvm_host_relax_perms_guest), + HANDLE_FUNC(__pkvm_host_wrprotect_guest), + HANDLE_FUNC(__pkvm_host_test_clear_young_guest), + HANDLE_FUNC(__pkvm_host_mkyoung_guest), HANDLE_FUNC(__kvm_adjust_pc), HANDLE_FUNC(__kvm_vcpu_run), HANDLE_FUNC(__kvm_flush_vm_context), @@ -409,6 +577,9 @@ static const hcall_t host_hcall[] = { HANDLE_FUNC(__pkvm_init_vm), HANDLE_FUNC(__pkvm_init_vcpu), HANDLE_FUNC(__pkvm_teardown_vm), + HANDLE_FUNC(__pkvm_vcpu_load), + HANDLE_FUNC(__pkvm_vcpu_put), + HANDLE_FUNC(__pkvm_tlb_flush_vmid), }; static void handle_host_hcall(struct kvm_cpu_context *host_ctxt) diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c index e75374d682f4..7ad7b133b81a 100644 --- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c +++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c @@ -201,8 +201,8 @@ static void *guest_s2_zalloc_page(void *mc) memset(addr, 0, PAGE_SIZE); p = hyp_virt_to_page(addr); - memset(p, 0, sizeof(*p)); p->refcount = 1; + p->order = 0; return addr; } @@ -268,6 +268,7 @@ int kvm_guest_prepare_stage2(struct pkvm_hyp_vm *vm, void *pgd) void reclaim_guest_pages(struct pkvm_hyp_vm *vm, struct kvm_hyp_memcache *mc) { + struct hyp_page *page; void *addr; /* Dump all pgtable pages in the hyp_pool */ @@ -279,7 +280,9 @@ void reclaim_guest_pages(struct pkvm_hyp_vm *vm, struct kvm_hyp_memcache *mc) /* Drain the hyp_pool into the memcache */ addr = hyp_alloc_pages(&vm->pool, 0); while (addr) { - memset(hyp_virt_to_page(addr), 0, sizeof(struct hyp_page)); + page = hyp_virt_to_page(addr); + page->refcount = 0; + page->order = 0; push_hyp_memcache(mc, addr, hyp_virt_to_phys); WARN_ON(__pkvm_hyp_donate_host(hyp_virt_to_pfn(addr), 1)); addr = hyp_alloc_pages(&vm->pool, 0); @@ -382,19 +385,28 @@ bool addr_is_memory(phys_addr_t phys) return !!find_mem_range(phys, &range); } -static bool addr_is_allowed_memory(phys_addr_t phys) +static bool is_in_mem_range(u64 addr, struct kvm_mem_range *range) +{ + return range->start <= addr && addr < range->end; +} + +static int check_range_allowed_memory(u64 start, u64 end) { struct memblock_region *reg; struct kvm_mem_range range; - reg = find_mem_range(phys, &range); + /* + * Callers can't check the state of a range that overlaps memory and + * MMIO regions, so ensure [start, end[ is in the same kvm_mem_range. + */ + reg = find_mem_range(start, &range); + if (!is_in_mem_range(end - 1, &range)) + return -EINVAL; - return reg && !(reg->flags & MEMBLOCK_NOMAP); -} + if (!reg || reg->flags & MEMBLOCK_NOMAP) + return -EPERM; -static bool is_in_mem_range(u64 addr, struct kvm_mem_range *range) -{ - return range->start <= addr && addr < range->end; + return 0; } static bool range_is_memory(u64 start, u64 end) @@ -454,8 +466,10 @@ static int host_stage2_adjust_range(u64 addr, struct kvm_mem_range *range) if (kvm_pte_valid(pte)) return -EAGAIN; - if (pte) + if (pte) { + WARN_ON(addr_is_memory(addr) && hyp_phys_to_page(addr)->host_state != PKVM_NOPAGE); return -EPERM; + } do { u64 granule = kvm_granule_size(level); @@ -477,10 +491,33 @@ int host_stage2_idmap_locked(phys_addr_t addr, u64 size, return host_stage2_try(__host_stage2_idmap, addr, addr + size, prot); } +static void __host_update_page_state(phys_addr_t addr, u64 size, enum pkvm_page_state state) +{ + phys_addr_t end = addr + size; + + for (; addr < end; addr += PAGE_SIZE) + hyp_phys_to_page(addr)->host_state = state; +} + int host_stage2_set_owner_locked(phys_addr_t addr, u64 size, u8 owner_id) { - return host_stage2_try(kvm_pgtable_stage2_set_owner, &host_mmu.pgt, - addr, size, &host_s2_pool, owner_id); + int ret; + + if (!addr_is_memory(addr)) + return -EPERM; + + ret = host_stage2_try(kvm_pgtable_stage2_set_owner, &host_mmu.pgt, + addr, size, &host_s2_pool, owner_id); + if (ret) + return ret; + + /* Don't forget to update the vmemmap tracking for the host */ + if (owner_id == PKVM_ID_HOST) + __host_update_page_state(addr, size, PKVM_PAGE_OWNED); + else + __host_update_page_state(addr, size, PKVM_NOPAGE); + + return 0; } static bool host_stage2_force_pte_cb(u64 addr, u64 end, enum kvm_pgtable_prot prot) @@ -546,39 +583,6 @@ void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt) BUG_ON(ret && ret != -EAGAIN); } -struct pkvm_mem_transition { - u64 nr_pages; - - struct { - enum pkvm_component_id id; - /* Address in the initiator's address space */ - u64 addr; - - union { - struct { - /* Address in the completer's address space */ - u64 completer_addr; - } host; - struct { - u64 completer_addr; - } hyp; - }; - } initiator; - - struct { - enum pkvm_component_id id; - } completer; -}; - -struct pkvm_mem_share { - const struct pkvm_mem_transition tx; - const enum kvm_pgtable_prot completer_prot; -}; - -struct pkvm_mem_donation { - const struct pkvm_mem_transition tx; -}; - struct check_walk_data { enum pkvm_page_state desired; enum pkvm_page_state (*get_page_state)(kvm_pte_t pte, u64 addr); @@ -604,115 +608,38 @@ static int check_page_state_range(struct kvm_pgtable *pgt, u64 addr, u64 size, return kvm_pgtable_walk(pgt, addr, size, &walker); } -static enum pkvm_page_state host_get_page_state(kvm_pte_t pte, u64 addr) -{ - if (!addr_is_allowed_memory(addr)) - return PKVM_NOPAGE; - - if (!kvm_pte_valid(pte) && pte) - return PKVM_NOPAGE; - - return pkvm_getstate(kvm_pgtable_stage2_pte_prot(pte)); -} - static int __host_check_page_state_range(u64 addr, u64 size, enum pkvm_page_state state) { - struct check_walk_data d = { - .desired = state, - .get_page_state = host_get_page_state, - }; + u64 end = addr + size; + int ret; + + ret = check_range_allowed_memory(addr, end); + if (ret) + return ret; hyp_assert_lock_held(&host_mmu.lock); - return check_page_state_range(&host_mmu.pgt, addr, size, &d); + for (; addr < end; addr += PAGE_SIZE) { + if (hyp_phys_to_page(addr)->host_state != state) + return -EPERM; + } + + return 0; } static int __host_set_page_state_range(u64 addr, u64 size, enum pkvm_page_state state) { - enum kvm_pgtable_prot prot = pkvm_mkstate(PKVM_HOST_MEM_PROT, state); - - return host_stage2_idmap_locked(addr, size, prot); -} - -static int host_request_owned_transition(u64 *completer_addr, - const struct pkvm_mem_transition *tx) -{ - u64 size = tx->nr_pages * PAGE_SIZE; - u64 addr = tx->initiator.addr; - - *completer_addr = tx->initiator.host.completer_addr; - return __host_check_page_state_range(addr, size, PKVM_PAGE_OWNED); -} - -static int host_request_unshare(u64 *completer_addr, - const struct pkvm_mem_transition *tx) -{ - u64 size = tx->nr_pages * PAGE_SIZE; - u64 addr = tx->initiator.addr; - - *completer_addr = tx->initiator.host.completer_addr; - return __host_check_page_state_range(addr, size, PKVM_PAGE_SHARED_OWNED); -} - -static int host_initiate_share(u64 *completer_addr, - const struct pkvm_mem_transition *tx) -{ - u64 size = tx->nr_pages * PAGE_SIZE; - u64 addr = tx->initiator.addr; - - *completer_addr = tx->initiator.host.completer_addr; - return __host_set_page_state_range(addr, size, PKVM_PAGE_SHARED_OWNED); -} - -static int host_initiate_unshare(u64 *completer_addr, - const struct pkvm_mem_transition *tx) -{ - u64 size = tx->nr_pages * PAGE_SIZE; - u64 addr = tx->initiator.addr; - - *completer_addr = tx->initiator.host.completer_addr; - return __host_set_page_state_range(addr, size, PKVM_PAGE_OWNED); -} - -static int host_initiate_donation(u64 *completer_addr, - const struct pkvm_mem_transition *tx) -{ - u8 owner_id = tx->completer.id; - u64 size = tx->nr_pages * PAGE_SIZE; - - *completer_addr = tx->initiator.host.completer_addr; - return host_stage2_set_owner_locked(tx->initiator.addr, size, owner_id); -} - -static bool __host_ack_skip_pgtable_check(const struct pkvm_mem_transition *tx) -{ - return !(IS_ENABLED(CONFIG_NVHE_EL2_DEBUG) || - tx->initiator.id != PKVM_ID_HYP); -} - -static int __host_ack_transition(u64 addr, const struct pkvm_mem_transition *tx, - enum pkvm_page_state state) -{ - u64 size = tx->nr_pages * PAGE_SIZE; - - if (__host_ack_skip_pgtable_check(tx)) - return 0; - - return __host_check_page_state_range(addr, size, state); -} + if (hyp_phys_to_page(addr)->host_state == PKVM_NOPAGE) { + int ret = host_stage2_idmap_locked(addr, size, PKVM_HOST_MEM_PROT); -static int host_ack_donation(u64 addr, const struct pkvm_mem_transition *tx) -{ - return __host_ack_transition(addr, tx, PKVM_NOPAGE); -} + if (ret) + return ret; + } -static int host_complete_donation(u64 addr, const struct pkvm_mem_transition *tx) -{ - u64 size = tx->nr_pages * PAGE_SIZE; - u8 host_id = tx->completer.id; + __host_update_page_state(addr, size, state); - return host_stage2_set_owner_locked(addr, size, host_id); + return 0; } static enum pkvm_page_state hyp_get_page_state(kvm_pte_t pte, u64 addr) @@ -735,573 +662,418 @@ static int __hyp_check_page_state_range(u64 addr, u64 size, return check_page_state_range(&pkvm_pgtable, addr, size, &d); } -static int hyp_request_donation(u64 *completer_addr, - const struct pkvm_mem_transition *tx) +static enum pkvm_page_state guest_get_page_state(kvm_pte_t pte, u64 addr) { - u64 size = tx->nr_pages * PAGE_SIZE; - u64 addr = tx->initiator.addr; + if (!kvm_pte_valid(pte)) + return PKVM_NOPAGE; - *completer_addr = tx->initiator.hyp.completer_addr; - return __hyp_check_page_state_range(addr, size, PKVM_PAGE_OWNED); + return pkvm_getstate(kvm_pgtable_stage2_pte_prot(pte)); } -static int hyp_initiate_donation(u64 *completer_addr, - const struct pkvm_mem_transition *tx) +static int __guest_check_page_state_range(struct pkvm_hyp_vcpu *vcpu, u64 addr, + u64 size, enum pkvm_page_state state) { - u64 size = tx->nr_pages * PAGE_SIZE; - int ret; + struct pkvm_hyp_vm *vm = pkvm_hyp_vcpu_to_hyp_vm(vcpu); + struct check_walk_data d = { + .desired = state, + .get_page_state = guest_get_page_state, + }; - *completer_addr = tx->initiator.hyp.completer_addr; - ret = kvm_pgtable_hyp_unmap(&pkvm_pgtable, tx->initiator.addr, size); - return (ret != size) ? -EFAULT : 0; + hyp_assert_lock_held(&vm->lock); + return check_page_state_range(&vm->pgt, addr, size, &d); } -static bool __hyp_ack_skip_pgtable_check(const struct pkvm_mem_transition *tx) +int __pkvm_host_share_hyp(u64 pfn) { - return !(IS_ENABLED(CONFIG_NVHE_EL2_DEBUG) || - tx->initiator.id != PKVM_ID_HOST); -} + u64 phys = hyp_pfn_to_phys(pfn); + void *virt = __hyp_va(phys); + enum kvm_pgtable_prot prot; + u64 size = PAGE_SIZE; + int ret; -static int hyp_ack_share(u64 addr, const struct pkvm_mem_transition *tx, - enum kvm_pgtable_prot perms) -{ - u64 size = tx->nr_pages * PAGE_SIZE; + host_lock_component(); + hyp_lock_component(); - if (perms != PAGE_HYP) - return -EPERM; + ret = __host_check_page_state_range(phys, size, PKVM_PAGE_OWNED); + if (ret) + goto unlock; + if (IS_ENABLED(CONFIG_NVHE_EL2_DEBUG)) { + ret = __hyp_check_page_state_range((u64)virt, size, PKVM_NOPAGE); + if (ret) + goto unlock; + } - if (__hyp_ack_skip_pgtable_check(tx)) - return 0; + prot = pkvm_mkstate(PAGE_HYP, PKVM_PAGE_SHARED_BORROWED); + WARN_ON(pkvm_create_mappings_locked(virt, virt + size, prot)); + WARN_ON(__host_set_page_state_range(phys, size, PKVM_PAGE_SHARED_OWNED)); - return __hyp_check_page_state_range(addr, size, PKVM_NOPAGE); +unlock: + hyp_unlock_component(); + host_unlock_component(); + + return ret; } -static int hyp_ack_unshare(u64 addr, const struct pkvm_mem_transition *tx) +int __pkvm_host_unshare_hyp(u64 pfn) { - u64 size = tx->nr_pages * PAGE_SIZE; + u64 phys = hyp_pfn_to_phys(pfn); + u64 virt = (u64)__hyp_va(phys); + u64 size = PAGE_SIZE; + int ret; - if (tx->initiator.id == PKVM_ID_HOST && hyp_page_count((void *)addr)) - return -EBUSY; + host_lock_component(); + hyp_lock_component(); - return __hyp_check_page_state_range(addr, size, - PKVM_PAGE_SHARED_BORROWED); -} + ret = __host_check_page_state_range(phys, size, PKVM_PAGE_SHARED_OWNED); + if (ret) + goto unlock; + ret = __hyp_check_page_state_range(virt, size, PKVM_PAGE_SHARED_BORROWED); + if (ret) + goto unlock; + if (hyp_page_count((void *)virt)) { + ret = -EBUSY; + goto unlock; + } -static int hyp_ack_donation(u64 addr, const struct pkvm_mem_transition *tx) -{ - u64 size = tx->nr_pages * PAGE_SIZE; + WARN_ON(kvm_pgtable_hyp_unmap(&pkvm_pgtable, virt, size) != size); + WARN_ON(__host_set_page_state_range(phys, size, PKVM_PAGE_OWNED)); - if (__hyp_ack_skip_pgtable_check(tx)) - return 0; +unlock: + hyp_unlock_component(); + host_unlock_component(); - return __hyp_check_page_state_range(addr, size, PKVM_NOPAGE); + return ret; } -static int hyp_complete_share(u64 addr, const struct pkvm_mem_transition *tx, - enum kvm_pgtable_prot perms) +int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages) { - void *start = (void *)addr, *end = start + (tx->nr_pages * PAGE_SIZE); + u64 phys = hyp_pfn_to_phys(pfn); + u64 size = PAGE_SIZE * nr_pages; + void *virt = __hyp_va(phys); enum kvm_pgtable_prot prot; + int ret; - prot = pkvm_mkstate(perms, PKVM_PAGE_SHARED_BORROWED); - return pkvm_create_mappings_locked(start, end, prot); -} + host_lock_component(); + hyp_lock_component(); -static int hyp_complete_unshare(u64 addr, const struct pkvm_mem_transition *tx) -{ - u64 size = tx->nr_pages * PAGE_SIZE; - int ret = kvm_pgtable_hyp_unmap(&pkvm_pgtable, addr, size); + ret = __host_check_page_state_range(phys, size, PKVM_PAGE_OWNED); + if (ret) + goto unlock; + if (IS_ENABLED(CONFIG_NVHE_EL2_DEBUG)) { + ret = __hyp_check_page_state_range((u64)virt, size, PKVM_NOPAGE); + if (ret) + goto unlock; + } - return (ret != size) ? -EFAULT : 0; -} + prot = pkvm_mkstate(PAGE_HYP, PKVM_PAGE_OWNED); + WARN_ON(pkvm_create_mappings_locked(virt, virt + size, prot)); + WARN_ON(host_stage2_set_owner_locked(phys, size, PKVM_ID_HYP)); -static int hyp_complete_donation(u64 addr, - const struct pkvm_mem_transition *tx) -{ - void *start = (void *)addr, *end = start + (tx->nr_pages * PAGE_SIZE); - enum kvm_pgtable_prot prot = pkvm_mkstate(PAGE_HYP, PKVM_PAGE_OWNED); +unlock: + hyp_unlock_component(); + host_unlock_component(); - return pkvm_create_mappings_locked(start, end, prot); + return ret; } -static int check_share(struct pkvm_mem_share *share) +int __pkvm_hyp_donate_host(u64 pfn, u64 nr_pages) { - const struct pkvm_mem_transition *tx = &share->tx; - u64 completer_addr; + u64 phys = hyp_pfn_to_phys(pfn); + u64 size = PAGE_SIZE * nr_pages; + u64 virt = (u64)__hyp_va(phys); int ret; - switch (tx->initiator.id) { - case PKVM_ID_HOST: - ret = host_request_owned_transition(&completer_addr, tx); - break; - default: - ret = -EINVAL; - } + host_lock_component(); + hyp_lock_component(); + ret = __hyp_check_page_state_range(virt, size, PKVM_PAGE_OWNED); if (ret) - return ret; - - switch (tx->completer.id) { - case PKVM_ID_HYP: - ret = hyp_ack_share(completer_addr, tx, share->completer_prot); - break; - case PKVM_ID_FFA: - /* - * We only check the host; the secure side will check the other - * end when we forward the FFA call. - */ - ret = 0; - break; - default: - ret = -EINVAL; + goto unlock; + if (IS_ENABLED(CONFIG_NVHE_EL2_DEBUG)) { + ret = __host_check_page_state_range(phys, size, PKVM_NOPAGE); + if (ret) + goto unlock; } + WARN_ON(kvm_pgtable_hyp_unmap(&pkvm_pgtable, virt, size) != size); + WARN_ON(host_stage2_set_owner_locked(phys, size, PKVM_ID_HOST)); + +unlock: + hyp_unlock_component(); + host_unlock_component(); + return ret; } -static int __do_share(struct pkvm_mem_share *share) +int hyp_pin_shared_mem(void *from, void *to) { - const struct pkvm_mem_transition *tx = &share->tx; - u64 completer_addr; + u64 cur, start = ALIGN_DOWN((u64)from, PAGE_SIZE); + u64 end = PAGE_ALIGN((u64)to); + u64 size = end - start; int ret; - switch (tx->initiator.id) { - case PKVM_ID_HOST: - ret = host_initiate_share(&completer_addr, tx); - break; - default: - ret = -EINVAL; - } + host_lock_component(); + hyp_lock_component(); + ret = __host_check_page_state_range(__hyp_pa(start), size, + PKVM_PAGE_SHARED_OWNED); if (ret) - return ret; - - switch (tx->completer.id) { - case PKVM_ID_HYP: - ret = hyp_complete_share(completer_addr, tx, share->completer_prot); - break; - case PKVM_ID_FFA: - /* - * We're not responsible for any secure page-tables, so there's - * nothing to do here. - */ - ret = 0; - break; - default: - ret = -EINVAL; - } + goto unlock; - return ret; -} + ret = __hyp_check_page_state_range(start, size, + PKVM_PAGE_SHARED_BORROWED); + if (ret) + goto unlock; -/* - * do_share(): - * - * The page owner grants access to another component with a given set - * of permissions. - * - * Initiator: OWNED => SHARED_OWNED - * Completer: NOPAGE => SHARED_BORROWED - */ -static int do_share(struct pkvm_mem_share *share) -{ - int ret; + for (cur = start; cur < end; cur += PAGE_SIZE) + hyp_page_ref_inc(hyp_virt_to_page(cur)); - ret = check_share(share); - if (ret) - return ret; +unlock: + hyp_unlock_component(); + host_unlock_component(); - return WARN_ON(__do_share(share)); + return ret; } -static int check_unshare(struct pkvm_mem_share *share) +void hyp_unpin_shared_mem(void *from, void *to) { - const struct pkvm_mem_transition *tx = &share->tx; - u64 completer_addr; - int ret; - - switch (tx->initiator.id) { - case PKVM_ID_HOST: - ret = host_request_unshare(&completer_addr, tx); - break; - default: - ret = -EINVAL; - } + u64 cur, start = ALIGN_DOWN((u64)from, PAGE_SIZE); + u64 end = PAGE_ALIGN((u64)to); - if (ret) - return ret; + host_lock_component(); + hyp_lock_component(); - switch (tx->completer.id) { - case PKVM_ID_HYP: - ret = hyp_ack_unshare(completer_addr, tx); - break; - case PKVM_ID_FFA: - /* See check_share() */ - ret = 0; - break; - default: - ret = -EINVAL; - } + for (cur = start; cur < end; cur += PAGE_SIZE) + hyp_page_ref_dec(hyp_virt_to_page(cur)); - return ret; + hyp_unlock_component(); + host_unlock_component(); } -static int __do_unshare(struct pkvm_mem_share *share) +int __pkvm_host_share_ffa(u64 pfn, u64 nr_pages) { - const struct pkvm_mem_transition *tx = &share->tx; - u64 completer_addr; + u64 phys = hyp_pfn_to_phys(pfn); + u64 size = PAGE_SIZE * nr_pages; int ret; - switch (tx->initiator.id) { - case PKVM_ID_HOST: - ret = host_initiate_unshare(&completer_addr, tx); - break; - default: - ret = -EINVAL; - } - - if (ret) - return ret; - - switch (tx->completer.id) { - case PKVM_ID_HYP: - ret = hyp_complete_unshare(completer_addr, tx); - break; - case PKVM_ID_FFA: - /* See __do_share() */ - ret = 0; - break; - default: - ret = -EINVAL; - } + host_lock_component(); + ret = __host_check_page_state_range(phys, size, PKVM_PAGE_OWNED); + if (!ret) + ret = __host_set_page_state_range(phys, size, PKVM_PAGE_SHARED_OWNED); + host_unlock_component(); return ret; } -/* - * do_unshare(): - * - * The page owner revokes access from another component for a range of - * pages which were previously shared using do_share(). - * - * Initiator: SHARED_OWNED => OWNED - * Completer: SHARED_BORROWED => NOPAGE - */ -static int do_unshare(struct pkvm_mem_share *share) +int __pkvm_host_unshare_ffa(u64 pfn, u64 nr_pages) { + u64 phys = hyp_pfn_to_phys(pfn); + u64 size = PAGE_SIZE * nr_pages; int ret; - ret = check_unshare(share); - if (ret) - return ret; + host_lock_component(); + ret = __host_check_page_state_range(phys, size, PKVM_PAGE_SHARED_OWNED); + if (!ret) + ret = __host_set_page_state_range(phys, size, PKVM_PAGE_OWNED); + host_unlock_component(); - return WARN_ON(__do_unshare(share)); + return ret; } -static int check_donation(struct pkvm_mem_donation *donation) +int __pkvm_host_share_guest(u64 pfn, u64 gfn, struct pkvm_hyp_vcpu *vcpu, + enum kvm_pgtable_prot prot) { - const struct pkvm_mem_transition *tx = &donation->tx; - u64 completer_addr; + struct pkvm_hyp_vm *vm = pkvm_hyp_vcpu_to_hyp_vm(vcpu); + u64 phys = hyp_pfn_to_phys(pfn); + u64 ipa = hyp_pfn_to_phys(gfn); + struct hyp_page *page; int ret; - switch (tx->initiator.id) { - case PKVM_ID_HOST: - ret = host_request_owned_transition(&completer_addr, tx); - break; - case PKVM_ID_HYP: - ret = hyp_request_donation(&completer_addr, tx); - break; - default: - ret = -EINVAL; - } + if (prot & ~KVM_PGTABLE_PROT_RWX) + return -EINVAL; + ret = check_range_allowed_memory(phys, phys + PAGE_SIZE); if (ret) return ret; - switch (tx->completer.id) { - case PKVM_ID_HOST: - ret = host_ack_donation(completer_addr, tx); - break; - case PKVM_ID_HYP: - ret = hyp_ack_donation(completer_addr, tx); - break; - default: - ret = -EINVAL; - } - - return ret; -} + host_lock_component(); + guest_lock_component(vm); -static int __do_donate(struct pkvm_mem_donation *donation) -{ - const struct pkvm_mem_transition *tx = &donation->tx; - u64 completer_addr; - int ret; + ret = __guest_check_page_state_range(vcpu, ipa, PAGE_SIZE, PKVM_NOPAGE); + if (ret) + goto unlock; - switch (tx->initiator.id) { - case PKVM_ID_HOST: - ret = host_initiate_donation(&completer_addr, tx); - break; - case PKVM_ID_HYP: - ret = hyp_initiate_donation(&completer_addr, tx); + page = hyp_phys_to_page(phys); + switch (page->host_state) { + case PKVM_PAGE_OWNED: + WARN_ON(__host_set_page_state_range(phys, PAGE_SIZE, PKVM_PAGE_SHARED_OWNED)); break; + case PKVM_PAGE_SHARED_OWNED: + if (page->host_share_guest_count) + break; + /* Only host to np-guest multi-sharing is tolerated */ + WARN_ON(1); + fallthrough; default: - ret = -EINVAL; + ret = -EPERM; + goto unlock; } - if (ret) - return ret; + WARN_ON(kvm_pgtable_stage2_map(&vm->pgt, ipa, PAGE_SIZE, phys, + pkvm_mkstate(prot, PKVM_PAGE_SHARED_BORROWED), + &vcpu->vcpu.arch.pkvm_memcache, 0)); + page->host_share_guest_count++; - switch (tx->completer.id) { - case PKVM_ID_HOST: - ret = host_complete_donation(completer_addr, tx); - break; - case PKVM_ID_HYP: - ret = hyp_complete_donation(completer_addr, tx); - break; - default: - ret = -EINVAL; - } +unlock: + guest_unlock_component(vm); + host_unlock_component(); return ret; } -/* - * do_donate(): - * - * The page owner transfers ownership to another component, losing access - * as a consequence. - * - * Initiator: OWNED => NOPAGE - * Completer: NOPAGE => OWNED - */ -static int do_donate(struct pkvm_mem_donation *donation) +static int __check_host_shared_guest(struct pkvm_hyp_vm *vm, u64 *__phys, u64 ipa) { + enum pkvm_page_state state; + struct hyp_page *page; + kvm_pte_t pte; + u64 phys; + s8 level; int ret; - ret = check_donation(donation); + ret = kvm_pgtable_get_leaf(&vm->pgt, ipa, &pte, &level); if (ret) return ret; + if (level != KVM_PGTABLE_LAST_LEVEL) + return -E2BIG; + if (!kvm_pte_valid(pte)) + return -ENOENT; - return WARN_ON(__do_donate(donation)); -} - -int __pkvm_host_share_hyp(u64 pfn) -{ - int ret; - u64 host_addr = hyp_pfn_to_phys(pfn); - u64 hyp_addr = (u64)__hyp_va(host_addr); - struct pkvm_mem_share share = { - .tx = { - .nr_pages = 1, - .initiator = { - .id = PKVM_ID_HOST, - .addr = host_addr, - .host = { - .completer_addr = hyp_addr, - }, - }, - .completer = { - .id = PKVM_ID_HYP, - }, - }, - .completer_prot = PAGE_HYP, - }; + state = guest_get_page_state(pte, ipa); + if (state != PKVM_PAGE_SHARED_BORROWED) + return -EPERM; - host_lock_component(); - hyp_lock_component(); + phys = kvm_pte_to_phys(pte); + ret = check_range_allowed_memory(phys, phys + PAGE_SIZE); + if (WARN_ON(ret)) + return ret; - ret = do_share(&share); + page = hyp_phys_to_page(phys); + if (page->host_state != PKVM_PAGE_SHARED_OWNED) + return -EPERM; + if (WARN_ON(!page->host_share_guest_count)) + return -EINVAL; - hyp_unlock_component(); - host_unlock_component(); + *__phys = phys; - return ret; + return 0; } -int __pkvm_host_unshare_hyp(u64 pfn) +int __pkvm_host_unshare_guest(u64 gfn, struct pkvm_hyp_vm *vm) { + u64 ipa = hyp_pfn_to_phys(gfn); + struct hyp_page *page; + u64 phys; int ret; - u64 host_addr = hyp_pfn_to_phys(pfn); - u64 hyp_addr = (u64)__hyp_va(host_addr); - struct pkvm_mem_share share = { - .tx = { - .nr_pages = 1, - .initiator = { - .id = PKVM_ID_HOST, - .addr = host_addr, - .host = { - .completer_addr = hyp_addr, - }, - }, - .completer = { - .id = PKVM_ID_HYP, - }, - }, - .completer_prot = PAGE_HYP, - }; host_lock_component(); - hyp_lock_component(); + guest_lock_component(vm); + + ret = __check_host_shared_guest(vm, &phys, ipa); + if (ret) + goto unlock; + + ret = kvm_pgtable_stage2_unmap(&vm->pgt, ipa, PAGE_SIZE); + if (ret) + goto unlock; - ret = do_unshare(&share); + page = hyp_phys_to_page(phys); + page->host_share_guest_count--; + if (!page->host_share_guest_count) + WARN_ON(__host_set_page_state_range(phys, PAGE_SIZE, PKVM_PAGE_OWNED)); - hyp_unlock_component(); +unlock: + guest_unlock_component(vm); host_unlock_component(); return ret; } -int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages) +int __pkvm_host_relax_perms_guest(u64 gfn, struct pkvm_hyp_vcpu *vcpu, enum kvm_pgtable_prot prot) { + struct pkvm_hyp_vm *vm = pkvm_hyp_vcpu_to_hyp_vm(vcpu); + u64 ipa = hyp_pfn_to_phys(gfn); + u64 phys; int ret; - u64 host_addr = hyp_pfn_to_phys(pfn); - u64 hyp_addr = (u64)__hyp_va(host_addr); - struct pkvm_mem_donation donation = { - .tx = { - .nr_pages = nr_pages, - .initiator = { - .id = PKVM_ID_HOST, - .addr = host_addr, - .host = { - .completer_addr = hyp_addr, - }, - }, - .completer = { - .id = PKVM_ID_HYP, - }, - }, - }; + + if (prot & ~KVM_PGTABLE_PROT_RWX) + return -EINVAL; host_lock_component(); - hyp_lock_component(); + guest_lock_component(vm); - ret = do_donate(&donation); + ret = __check_host_shared_guest(vm, &phys, ipa); + if (!ret) + ret = kvm_pgtable_stage2_relax_perms(&vm->pgt, ipa, prot, 0); - hyp_unlock_component(); + guest_unlock_component(vm); host_unlock_component(); return ret; } -int __pkvm_hyp_donate_host(u64 pfn, u64 nr_pages) +int __pkvm_host_wrprotect_guest(u64 gfn, struct pkvm_hyp_vm *vm) { + u64 ipa = hyp_pfn_to_phys(gfn); + u64 phys; int ret; - u64 host_addr = hyp_pfn_to_phys(pfn); - u64 hyp_addr = (u64)__hyp_va(host_addr); - struct pkvm_mem_donation donation = { - .tx = { - .nr_pages = nr_pages, - .initiator = { - .id = PKVM_ID_HYP, - .addr = hyp_addr, - .hyp = { - .completer_addr = host_addr, - }, - }, - .completer = { - .id = PKVM_ID_HOST, - }, - }, - }; host_lock_component(); - hyp_lock_component(); + guest_lock_component(vm); - ret = do_donate(&donation); + ret = __check_host_shared_guest(vm, &phys, ipa); + if (!ret) + ret = kvm_pgtable_stage2_wrprotect(&vm->pgt, ipa, PAGE_SIZE); - hyp_unlock_component(); + guest_unlock_component(vm); host_unlock_component(); return ret; } -int hyp_pin_shared_mem(void *from, void *to) +int __pkvm_host_test_clear_young_guest(u64 gfn, bool mkold, struct pkvm_hyp_vm *vm) { - u64 cur, start = ALIGN_DOWN((u64)from, PAGE_SIZE); - u64 end = PAGE_ALIGN((u64)to); - u64 size = end - start; + u64 ipa = hyp_pfn_to_phys(gfn); + u64 phys; int ret; host_lock_component(); - hyp_lock_component(); - - ret = __host_check_page_state_range(__hyp_pa(start), size, - PKVM_PAGE_SHARED_OWNED); - if (ret) - goto unlock; - - ret = __hyp_check_page_state_range(start, size, - PKVM_PAGE_SHARED_BORROWED); - if (ret) - goto unlock; + guest_lock_component(vm); - for (cur = start; cur < end; cur += PAGE_SIZE) - hyp_page_ref_inc(hyp_virt_to_page(cur)); + ret = __check_host_shared_guest(vm, &phys, ipa); + if (!ret) + ret = kvm_pgtable_stage2_test_clear_young(&vm->pgt, ipa, PAGE_SIZE, mkold); -unlock: - hyp_unlock_component(); + guest_unlock_component(vm); host_unlock_component(); return ret; } -void hyp_unpin_shared_mem(void *from, void *to) -{ - u64 cur, start = ALIGN_DOWN((u64)from, PAGE_SIZE); - u64 end = PAGE_ALIGN((u64)to); - - host_lock_component(); - hyp_lock_component(); - - for (cur = start; cur < end; cur += PAGE_SIZE) - hyp_page_ref_dec(hyp_virt_to_page(cur)); - - hyp_unlock_component(); - host_unlock_component(); -} - -int __pkvm_host_share_ffa(u64 pfn, u64 nr_pages) +int __pkvm_host_mkyoung_guest(u64 gfn, struct pkvm_hyp_vcpu *vcpu) { + struct pkvm_hyp_vm *vm = pkvm_hyp_vcpu_to_hyp_vm(vcpu); + u64 ipa = hyp_pfn_to_phys(gfn); + u64 phys; int ret; - struct pkvm_mem_share share = { - .tx = { - .nr_pages = nr_pages, - .initiator = { - .id = PKVM_ID_HOST, - .addr = hyp_pfn_to_phys(pfn), - }, - .completer = { - .id = PKVM_ID_FFA, - }, - }, - }; host_lock_component(); - ret = do_share(&share); - host_unlock_component(); + guest_lock_component(vm); - return ret; -} + ret = __check_host_shared_guest(vm, &phys, ipa); + if (!ret) + kvm_pgtable_stage2_mkyoung(&vm->pgt, ipa, 0); -int __pkvm_host_unshare_ffa(u64 pfn, u64 nr_pages) -{ - int ret; - struct pkvm_mem_share share = { - .tx = { - .nr_pages = nr_pages, - .initiator = { - .id = PKVM_ID_HOST, - .addr = hyp_pfn_to_phys(pfn), - }, - .completer = { - .id = PKVM_ID_FFA, - }, - }, - }; - - host_lock_component(); - ret = do_unshare(&share); + guest_unlock_component(vm); host_unlock_component(); return ret; diff --git a/arch/arm64/kvm/hyp/nvhe/mm.c b/arch/arm64/kvm/hyp/nvhe/mm.c index 8850b591d775..f41c7440b34b 100644 --- a/arch/arm64/kvm/hyp/nvhe/mm.c +++ b/arch/arm64/kvm/hyp/nvhe/mm.c @@ -360,10 +360,10 @@ int pkvm_create_stack(phys_addr_t phys, unsigned long *haddr) prev_base = __io_map_base; /* - * Efficient stack verification using the PAGE_SHIFT bit implies + * Efficient stack verification using the NVHE_STACK_SHIFT bit implies * an alignment of our allocation on the order of the size. */ - size = PAGE_SIZE * 2; + size = NVHE_STACK_SIZE * 2; addr = ALIGN(__io_map_base, size); ret = __pkvm_alloc_private_va_range(addr, size); @@ -373,12 +373,12 @@ int pkvm_create_stack(phys_addr_t phys, unsigned long *haddr) * at the higher address and leave the lower guard page * unbacked. * - * Any valid stack address now has the PAGE_SHIFT bit as 1 + * Any valid stack address now has the NVHE_STACK_SHIFT bit as 1 * and addresses corresponding to the guard page have the - * PAGE_SHIFT bit as 0 - this is used for overflow detection. + * NVHE_STACK_SHIFT bit as 0 - this is used for overflow detection. */ - ret = kvm_pgtable_hyp_map(&pkvm_pgtable, addr + PAGE_SIZE, - PAGE_SIZE, phys, PAGE_HYP); + ret = kvm_pgtable_hyp_map(&pkvm_pgtable, addr + NVHE_STACK_SIZE, + NVHE_STACK_SIZE, phys, PAGE_HYP); if (ret) __io_map_base = prev_base; } diff --git a/arch/arm64/kvm/hyp/nvhe/page_alloc.c b/arch/arm64/kvm/hyp/nvhe/page_alloc.c index e691290d3765..a1eb27a1a747 100644 --- a/arch/arm64/kvm/hyp/nvhe/page_alloc.c +++ b/arch/arm64/kvm/hyp/nvhe/page_alloc.c @@ -32,7 +32,7 @@ u64 __hyp_vmemmap; */ static struct hyp_page *__find_buddy_nocheck(struct hyp_pool *pool, struct hyp_page *p, - unsigned short order) + u8 order) { phys_addr_t addr = hyp_page_to_phys(p); @@ -51,7 +51,7 @@ static struct hyp_page *__find_buddy_nocheck(struct hyp_pool *pool, /* Find a buddy page currently available for allocation */ static struct hyp_page *__find_buddy_avail(struct hyp_pool *pool, struct hyp_page *p, - unsigned short order) + u8 order) { struct hyp_page *buddy = __find_buddy_nocheck(pool, p, order); @@ -94,7 +94,7 @@ static void __hyp_attach_page(struct hyp_pool *pool, struct hyp_page *p) { phys_addr_t phys = hyp_page_to_phys(p); - unsigned short order = p->order; + u8 order = p->order; struct hyp_page *buddy; memset(hyp_page_to_virt(p), 0, PAGE_SIZE << p->order); @@ -129,7 +129,7 @@ insert: static struct hyp_page *__hyp_extract_page(struct hyp_pool *pool, struct hyp_page *p, - unsigned short order) + u8 order) { struct hyp_page *buddy; @@ -183,7 +183,7 @@ void hyp_get_page(struct hyp_pool *pool, void *addr) void hyp_split_page(struct hyp_page *p) { - unsigned short order = p->order; + u8 order = p->order; unsigned int i; p->order = 0; @@ -195,10 +195,10 @@ void hyp_split_page(struct hyp_page *p) } } -void *hyp_alloc_pages(struct hyp_pool *pool, unsigned short order) +void *hyp_alloc_pages(struct hyp_pool *pool, u8 order) { - unsigned short i = order; struct hyp_page *p; + u8 i = order; hyp_spin_lock(&pool->lock); diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c index 73e319891327..3927fe52a3dd 100644 --- a/arch/arm64/kvm/hyp/nvhe/pkvm.c +++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c @@ -9,7 +9,6 @@ #include <asm/kvm_emulate.h> -#include <nvhe/fixed_config.h> #include <nvhe/mem_protect.h> #include <nvhe/memory.h> #include <nvhe/pkvm.h> @@ -24,232 +23,160 @@ unsigned int kvm_arm_vmid_bits; unsigned int kvm_host_sve_max_vl; /* - * Set trap register values based on features in ID_AA64PFR0. + * The currently loaded hyp vCPU for each physical CPU. Used only when + * protected KVM is enabled, but for both protected and non-protected VMs. */ -static void pvm_init_traps_aa64pfr0(struct kvm_vcpu *vcpu) -{ - const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64PFR0_EL1); - u64 hcr_set = HCR_RW; - u64 hcr_clear = 0; - u64 cptr_set = 0; - u64 cptr_clear = 0; - - /* Protected KVM does not support AArch32 guests. */ - BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0), - PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) != ID_AA64PFR0_EL1_EL0_IMP); - BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL1), - PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) != ID_AA64PFR0_EL1_EL1_IMP); +static DEFINE_PER_CPU(struct pkvm_hyp_vcpu *, loaded_hyp_vcpu); - /* - * Linux guests assume support for floating-point and Advanced SIMD. Do - * not change the trapping behavior for these from the KVM default. - */ - BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_FP), - PVM_ID_AA64PFR0_ALLOW)); - BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AdvSIMD), - PVM_ID_AA64PFR0_ALLOW)); +static void pkvm_vcpu_reset_hcr(struct kvm_vcpu *vcpu) +{ + vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS; if (has_hvhe()) - hcr_set |= HCR_E2H; + vcpu->arch.hcr_el2 |= HCR_E2H; - /* Trap RAS unless all current versions are supported */ - if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_RAS), feature_ids) < - ID_AA64PFR0_EL1_RAS_V1P1) { - hcr_set |= HCR_TERR | HCR_TEA; - hcr_clear |= HCR_FIEN; + if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN)) { + /* route synchronous external abort exceptions to EL2 */ + vcpu->arch.hcr_el2 |= HCR_TEA; + /* trap error record accesses */ + vcpu->arch.hcr_el2 |= HCR_TERR; } - /* Trap AMU */ - if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AMU), feature_ids)) { - hcr_clear |= HCR_AMVOFFEN; - cptr_set |= CPTR_EL2_TAM; - } + if (cpus_have_final_cap(ARM64_HAS_STAGE2_FWB)) + vcpu->arch.hcr_el2 |= HCR_FWB; - /* Trap SVE */ - if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE), feature_ids)) { - if (has_hvhe()) - cptr_clear |= CPACR_EL1_ZEN; - else - cptr_set |= CPTR_EL2_TZ; - } + if (cpus_have_final_cap(ARM64_HAS_EVT) && + !cpus_have_final_cap(ARM64_MISMATCHED_CACHE_TYPE)) + vcpu->arch.hcr_el2 |= HCR_TID4; + else + vcpu->arch.hcr_el2 |= HCR_TID2; - vcpu->arch.hcr_el2 |= hcr_set; - vcpu->arch.hcr_el2 &= ~hcr_clear; - vcpu->arch.cptr_el2 |= cptr_set; - vcpu->arch.cptr_el2 &= ~cptr_clear; + if (vcpu_has_ptrauth(vcpu)) + vcpu->arch.hcr_el2 |= (HCR_API | HCR_APK); + + if (kvm_has_mte(vcpu->kvm)) + vcpu->arch.hcr_el2 |= HCR_ATA; } -/* - * Set trap register values based on features in ID_AA64PFR1. - */ -static void pvm_init_traps_aa64pfr1(struct kvm_vcpu *vcpu) +static void pvm_init_traps_hcr(struct kvm_vcpu *vcpu) { - const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64PFR1_EL1); - u64 hcr_set = 0; - u64 hcr_clear = 0; + struct kvm *kvm = vcpu->kvm; + u64 val = vcpu->arch.hcr_el2; - /* Memory Tagging: Trap and Treat as Untagged if not supported. */ - if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE), feature_ids)) { - hcr_set |= HCR_TID5; - hcr_clear |= HCR_DCT | HCR_ATA; + /* No support for AArch32. */ + val |= HCR_RW; + + /* + * Always trap: + * - Feature id registers: to control features exposed to guests + * - Implementation-defined features + */ + val |= HCR_TACR | HCR_TIDCP | HCR_TID3 | HCR_TID1; + + if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, RAS, IMP)) { + val |= HCR_TERR | HCR_TEA; + val &= ~(HCR_FIEN); } - vcpu->arch.hcr_el2 |= hcr_set; - vcpu->arch.hcr_el2 &= ~hcr_clear; -} + if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, AMU, IMP)) + val &= ~(HCR_AMVOFFEN); -/* - * Set trap register values based on features in ID_AA64DFR0. - */ -static void pvm_init_traps_aa64dfr0(struct kvm_vcpu *vcpu) -{ - const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64DFR0_EL1); - u64 mdcr_set = 0; - u64 mdcr_clear = 0; - u64 cptr_set = 0; - - /* Trap/constrain PMU */ - if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), feature_ids)) { - mdcr_set |= MDCR_EL2_TPM | MDCR_EL2_TPMCR; - mdcr_clear |= MDCR_EL2_HPME | MDCR_EL2_MTPME | - MDCR_EL2_HPMN_MASK; + if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, MTE, IMP)) { + val |= HCR_TID5; + val &= ~(HCR_DCT | HCR_ATA); } - /* Trap Debug */ - if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer), feature_ids)) - mdcr_set |= MDCR_EL2_TDRA | MDCR_EL2_TDA | MDCR_EL2_TDE; + if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, LO, IMP)) + val |= HCR_TLOR; - /* Trap OS Double Lock */ - if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DoubleLock), feature_ids)) - mdcr_set |= MDCR_EL2_TDOSA; + vcpu->arch.hcr_el2 = val; +} + +static void pvm_init_traps_mdcr(struct kvm_vcpu *vcpu) +{ + struct kvm *kvm = vcpu->kvm; + u64 val = vcpu->arch.mdcr_el2; - /* Trap SPE */ - if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMSVer), feature_ids)) { - mdcr_set |= MDCR_EL2_TPMS; - mdcr_clear |= MDCR_EL2_E2PB_MASK; + if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMUVer, IMP)) { + val |= MDCR_EL2_TPM | MDCR_EL2_TPMCR; + val &= ~(MDCR_EL2_HPME | MDCR_EL2_MTPME | MDCR_EL2_HPMN_MASK); } - /* Trap Trace Filter */ - if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceFilt), feature_ids)) - mdcr_set |= MDCR_EL2_TTRF; + if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, DebugVer, IMP)) + val |= MDCR_EL2_TDRA | MDCR_EL2_TDA; - /* Trap Trace */ - if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceVer), feature_ids)) { - if (has_hvhe()) - cptr_set |= CPACR_EL1_TTA; - else - cptr_set |= CPTR_EL2_TTA; - } + if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, DoubleLock, IMP)) + val |= MDCR_EL2_TDOSA; - /* Trap External Trace */ - if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_ExtTrcBuff), feature_ids)) - mdcr_clear |= MDCR_EL2_E2TB_MASK; + if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMSVer, IMP)) { + val |= MDCR_EL2_TPMS; + val &= ~MDCR_EL2_E2PB_MASK; + } - vcpu->arch.mdcr_el2 |= mdcr_set; - vcpu->arch.mdcr_el2 &= ~mdcr_clear; - vcpu->arch.cptr_el2 |= cptr_set; -} + if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, TraceFilt, IMP)) + val |= MDCR_EL2_TTRF; -/* - * Set trap register values based on features in ID_AA64MMFR0. - */ -static void pvm_init_traps_aa64mmfr0(struct kvm_vcpu *vcpu) -{ - const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64MMFR0_EL1); - u64 mdcr_set = 0; + if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, ExtTrcBuff, IMP)) + val |= MDCR_EL2_E2TB_MASK; /* Trap Debug Communications Channel registers */ - if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_FGT), feature_ids)) - mdcr_set |= MDCR_EL2_TDCC; + if (!kvm_has_feat(kvm, ID_AA64MMFR0_EL1, FGT, IMP)) + val |= MDCR_EL2_TDCC; - vcpu->arch.mdcr_el2 |= mdcr_set; + vcpu->arch.mdcr_el2 = val; } /* - * Set trap register values based on features in ID_AA64MMFR1. + * Check that cpu features that are neither trapped nor supported are not + * enabled for protected VMs. */ -static void pvm_init_traps_aa64mmfr1(struct kvm_vcpu *vcpu) +static int pkvm_check_pvm_cpu_features(struct kvm_vcpu *vcpu) { - const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64MMFR1_EL1); - u64 hcr_set = 0; - - /* Trap LOR */ - if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_LO), feature_ids)) - hcr_set |= HCR_TLOR; + struct kvm *kvm = vcpu->kvm; - vcpu->arch.hcr_el2 |= hcr_set; -} - -/* - * Set baseline trap register values. - */ -static void pvm_init_trap_regs(struct kvm_vcpu *vcpu) -{ - const u64 hcr_trap_feat_regs = HCR_TID3; - const u64 hcr_trap_impdef = HCR_TACR | HCR_TIDCP | HCR_TID1; + /* Protected KVM does not support AArch32 guests. */ + if (kvm_has_feat(kvm, ID_AA64PFR0_EL1, EL0, AARCH32) || + kvm_has_feat(kvm, ID_AA64PFR0_EL1, EL1, AARCH32)) + return -EINVAL; /* - * Always trap: - * - Feature id registers: to control features exposed to guests - * - Implementation-defined features + * Linux guests assume support for floating-point and Advanced SIMD. Do + * not change the trapping behavior for these from the KVM default. */ - vcpu->arch.hcr_el2 |= hcr_trap_feat_regs | hcr_trap_impdef; - - /* Clear res0 and set res1 bits to trap potential new features. */ - vcpu->arch.hcr_el2 &= ~(HCR_RES0); - vcpu->arch.mdcr_el2 &= ~(MDCR_EL2_RES0); - if (!has_hvhe()) { - vcpu->arch.cptr_el2 |= CPTR_NVHE_EL2_RES1; - vcpu->arch.cptr_el2 &= ~(CPTR_NVHE_EL2_RES0); - } -} - -static void pkvm_vcpu_reset_hcr(struct kvm_vcpu *vcpu) -{ - vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS; - - if (has_hvhe()) - vcpu->arch.hcr_el2 |= HCR_E2H; - - if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN)) { - /* route synchronous external abort exceptions to EL2 */ - vcpu->arch.hcr_el2 |= HCR_TEA; - /* trap error record accesses */ - vcpu->arch.hcr_el2 |= HCR_TERR; - } - - if (cpus_have_final_cap(ARM64_HAS_STAGE2_FWB)) - vcpu->arch.hcr_el2 |= HCR_FWB; + if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, FP, IMP) || + !kvm_has_feat(kvm, ID_AA64PFR0_EL1, AdvSIMD, IMP)) + return -EINVAL; - if (cpus_have_final_cap(ARM64_HAS_EVT) && - !cpus_have_final_cap(ARM64_MISMATCHED_CACHE_TYPE)) - vcpu->arch.hcr_el2 |= HCR_TID4; - else - vcpu->arch.hcr_el2 |= HCR_TID2; + /* No SME support in KVM right now. Check to catch if it changes. */ + if (kvm_has_feat(kvm, ID_AA64PFR1_EL1, SME, IMP)) + return -EINVAL; - if (vcpu_has_ptrauth(vcpu)) - vcpu->arch.hcr_el2 |= (HCR_API | HCR_APK); + return 0; } /* * Initialize trap register values in protected mode. */ -static void pkvm_vcpu_init_traps(struct kvm_vcpu *vcpu) +static int pkvm_vcpu_init_traps(struct pkvm_hyp_vcpu *hyp_vcpu) { - vcpu->arch.cptr_el2 = kvm_get_reset_cptr_el2(vcpu); + struct kvm_vcpu *vcpu = &hyp_vcpu->vcpu; + int ret; + vcpu->arch.mdcr_el2 = 0; pkvm_vcpu_reset_hcr(vcpu); - if ((!vcpu_is_protected(vcpu))) - return; + if ((!pkvm_hyp_vcpu_is_protected(hyp_vcpu))) + return 0; + + ret = pkvm_check_pvm_cpu_features(vcpu); + if (ret) + return ret; + + pvm_init_traps_hcr(vcpu); + pvm_init_traps_mdcr(vcpu); - pvm_init_trap_regs(vcpu); - pvm_init_traps_aa64pfr0(vcpu); - pvm_init_traps_aa64pfr1(vcpu); - pvm_init_traps_aa64dfr0(vcpu); - pvm_init_traps_aa64mmfr0(vcpu); - pvm_init_traps_aa64mmfr1(vcpu); + return 0; } /* @@ -270,10 +197,10 @@ static pkvm_handle_t idx_to_vm_handle(unsigned int idx) /* * Spinlock for protecting state related to the VM table. Protects writes - * to 'vm_table' and 'nr_table_entries' as well as reads and writes to - * 'last_hyp_vcpu_lookup'. + * to 'vm_table', 'nr_table_entries', and other per-vm state on initialization. + * Also protects reads and writes to 'last_hyp_vcpu_lookup'. */ -static DEFINE_HYP_SPINLOCK(vm_table_lock); +DEFINE_HYP_SPINLOCK(vm_table_lock); /* * The table of VM entries for protected VMs in hyp. @@ -306,15 +233,30 @@ struct pkvm_hyp_vcpu *pkvm_load_hyp_vcpu(pkvm_handle_t handle, struct pkvm_hyp_vcpu *hyp_vcpu = NULL; struct pkvm_hyp_vm *hyp_vm; + /* Cannot load a new vcpu without putting the old one first. */ + if (__this_cpu_read(loaded_hyp_vcpu)) + return NULL; + hyp_spin_lock(&vm_table_lock); hyp_vm = get_vm_by_handle(handle); if (!hyp_vm || hyp_vm->nr_vcpus <= vcpu_idx) goto unlock; hyp_vcpu = hyp_vm->vcpus[vcpu_idx]; + + /* Ensure vcpu isn't loaded on more than one cpu simultaneously. */ + if (unlikely(hyp_vcpu->loaded_hyp_vcpu)) { + hyp_vcpu = NULL; + goto unlock; + } + + hyp_vcpu->loaded_hyp_vcpu = this_cpu_ptr(&loaded_hyp_vcpu); hyp_page_ref_inc(hyp_virt_to_page(hyp_vm)); unlock: hyp_spin_unlock(&vm_table_lock); + + if (hyp_vcpu) + __this_cpu_write(loaded_hyp_vcpu, hyp_vcpu); return hyp_vcpu; } @@ -323,17 +265,63 @@ void pkvm_put_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu) struct pkvm_hyp_vm *hyp_vm = pkvm_hyp_vcpu_to_hyp_vm(hyp_vcpu); hyp_spin_lock(&vm_table_lock); + hyp_vcpu->loaded_hyp_vcpu = NULL; + __this_cpu_write(loaded_hyp_vcpu, NULL); + hyp_page_ref_dec(hyp_virt_to_page(hyp_vm)); + hyp_spin_unlock(&vm_table_lock); +} + +struct pkvm_hyp_vcpu *pkvm_get_loaded_hyp_vcpu(void) +{ + return __this_cpu_read(loaded_hyp_vcpu); + +} + +struct pkvm_hyp_vm *get_pkvm_hyp_vm(pkvm_handle_t handle) +{ + struct pkvm_hyp_vm *hyp_vm; + + hyp_spin_lock(&vm_table_lock); + hyp_vm = get_vm_by_handle(handle); + if (hyp_vm) + hyp_page_ref_inc(hyp_virt_to_page(hyp_vm)); + hyp_spin_unlock(&vm_table_lock); + + return hyp_vm; +} + +void put_pkvm_hyp_vm(struct pkvm_hyp_vm *hyp_vm) +{ + hyp_spin_lock(&vm_table_lock); hyp_page_ref_dec(hyp_virt_to_page(hyp_vm)); hyp_spin_unlock(&vm_table_lock); } +struct pkvm_hyp_vm *get_np_pkvm_hyp_vm(pkvm_handle_t handle) +{ + struct pkvm_hyp_vm *hyp_vm = get_pkvm_hyp_vm(handle); + + if (hyp_vm && pkvm_hyp_vm_is_protected(hyp_vm)) { + put_pkvm_hyp_vm(hyp_vm); + hyp_vm = NULL; + } + + return hyp_vm; +} + static void pkvm_init_features_from_host(struct pkvm_hyp_vm *hyp_vm, const struct kvm *host_kvm) { struct kvm *kvm = &hyp_vm->kvm; + unsigned long host_arch_flags = READ_ONCE(host_kvm->arch.flags); DECLARE_BITMAP(allowed_features, KVM_VCPU_MAX_FEATURES); + if (test_bit(KVM_ARCH_FLAG_MTE_ENABLED, &host_kvm->arch.flags)) + set_bit(KVM_ARCH_FLAG_MTE_ENABLED, &kvm->arch.flags); + /* No restrictions for non-protected VMs. */ if (!kvm_vm_is_protected(kvm)) { + hyp_vm->kvm.arch.flags = host_arch_flags; + bitmap_copy(kvm->arch.vcpu_features, host_kvm->arch.vcpu_features, KVM_VCPU_MAX_FEATURES); @@ -342,50 +330,26 @@ static void pkvm_init_features_from_host(struct pkvm_hyp_vm *hyp_vm, const struc bitmap_zero(allowed_features, KVM_VCPU_MAX_FEATURES); - /* - * For protected VMs, always allow: - * - CPU starting in poweroff state - * - PSCI v0.2 - */ - set_bit(KVM_ARM_VCPU_POWER_OFF, allowed_features); set_bit(KVM_ARM_VCPU_PSCI_0_2, allowed_features); - /* - * Check if remaining features are allowed: - * - Performance Monitoring - * - Scalable Vectors - * - Pointer Authentication - */ - if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), PVM_ID_AA64DFR0_ALLOW)) + if (kvm_pvm_ext_allowed(KVM_CAP_ARM_PMU_V3)) set_bit(KVM_ARM_VCPU_PMU_V3, allowed_features); - if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE), PVM_ID_AA64PFR0_ALLOW)) - set_bit(KVM_ARM_VCPU_SVE, allowed_features); - - if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_API), PVM_ID_AA64ISAR1_RESTRICT_UNSIGNED) && - FIELD_GET(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_APA), PVM_ID_AA64ISAR1_RESTRICT_UNSIGNED)) + if (kvm_pvm_ext_allowed(KVM_CAP_ARM_PTRAUTH_ADDRESS)) set_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, allowed_features); - if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPI), PVM_ID_AA64ISAR1_ALLOW) && - FIELD_GET(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPA), PVM_ID_AA64ISAR1_ALLOW)) + if (kvm_pvm_ext_allowed(KVM_CAP_ARM_PTRAUTH_GENERIC)) set_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, allowed_features); + if (kvm_pvm_ext_allowed(KVM_CAP_ARM_SVE)) { + set_bit(KVM_ARM_VCPU_SVE, allowed_features); + kvm->arch.flags |= host_arch_flags & BIT(KVM_ARCH_FLAG_GUEST_HAS_SVE); + } + bitmap_and(kvm->arch.vcpu_features, host_kvm->arch.vcpu_features, allowed_features, KVM_VCPU_MAX_FEATURES); } -static void pkvm_vcpu_init_ptrauth(struct pkvm_hyp_vcpu *hyp_vcpu) -{ - struct kvm_vcpu *vcpu = &hyp_vcpu->vcpu; - - if (vcpu_has_feature(vcpu, KVM_ARM_VCPU_PTRAUTH_ADDRESS) || - vcpu_has_feature(vcpu, KVM_ARM_VCPU_PTRAUTH_GENERIC)) { - kvm_vcpu_enable_ptrauth(vcpu); - } else { - vcpu_clear_flag(&hyp_vcpu->vcpu, GUEST_HAS_PTRAUTH); - } -} - static void unpin_host_vcpu(struct kvm_vcpu *host_vcpu) { if (host_vcpu) @@ -408,6 +372,7 @@ static void init_pkvm_hyp_vm(struct kvm *host_kvm, struct pkvm_hyp_vm *hyp_vm, hyp_vm->kvm.created_vcpus = nr_vcpus; hyp_vm->kvm.arch.mmu.vtcr = host_mmu.arch.mmu.vtcr; hyp_vm->kvm.arch.pkvm.enabled = READ_ONCE(host_kvm->arch.pkvm.enabled); + hyp_vm->kvm.arch.flags = 0; pkvm_init_features_from_host(hyp_vm, host_kvm); } @@ -415,10 +380,8 @@ static void pkvm_vcpu_init_sve(struct pkvm_hyp_vcpu *hyp_vcpu, struct kvm_vcpu * { struct kvm_vcpu *vcpu = &hyp_vcpu->vcpu; - if (!vcpu_has_feature(vcpu, KVM_ARM_VCPU_SVE)) { - vcpu_clear_flag(vcpu, GUEST_HAS_SVE); + if (!vcpu_has_feature(vcpu, KVM_ARM_VCPU_SVE)) vcpu_clear_flag(vcpu, VCPU_SVE_FINALIZED); - } } static int init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu, @@ -446,9 +409,14 @@ static int init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu, hyp_vcpu->vcpu.arch.cflags = READ_ONCE(host_vcpu->arch.cflags); hyp_vcpu->vcpu.arch.mp_state.mp_state = KVM_MP_STATE_STOPPED; + if (pkvm_hyp_vcpu_is_protected(hyp_vcpu)) + kvm_init_pvm_id_regs(&hyp_vcpu->vcpu); + + ret = pkvm_vcpu_init_traps(hyp_vcpu); + if (ret) + goto done; + pkvm_vcpu_init_sve(hyp_vcpu, host_vcpu); - pkvm_vcpu_init_ptrauth(hyp_vcpu); - pkvm_vcpu_init_traps(&hyp_vcpu->vcpu); done: if (ret) unpin_host_vcpu(host_vcpu); @@ -693,8 +661,6 @@ unlock: return ret; } - hyp_vcpu->vcpu.arch.cptr_el2 = kvm_get_reset_cptr_el2(&hyp_vcpu->vcpu); - return 0; } @@ -746,6 +712,14 @@ int __pkvm_teardown_vm(pkvm_handle_t handle) /* Push the metadata pages to the teardown memcache */ for (idx = 0; idx < hyp_vm->nr_vcpus; ++idx) { struct pkvm_hyp_vcpu *hyp_vcpu = hyp_vm->vcpus[idx]; + struct kvm_hyp_memcache *vcpu_mc = &hyp_vcpu->vcpu.arch.pkvm_memcache; + + while (vcpu_mc->nr_pages) { + void *addr = pop_hyp_memcache(vcpu_mc, hyp_phys_to_virt); + + push_hyp_memcache(mc, addr, hyp_virt_to_phys); + unmap_donated_memory_noclear(addr, PAGE_SIZE); + } teardown_donated_memory(mc, hyp_vcpu, sizeof(*hyp_vcpu)); } diff --git a/arch/arm64/kvm/hyp/nvhe/setup.c b/arch/arm64/kvm/hyp/nvhe/setup.c index cbdd18cd3f98..d62bcb5634a2 100644 --- a/arch/arm64/kvm/hyp/nvhe/setup.c +++ b/arch/arm64/kvm/hyp/nvhe/setup.c @@ -12,7 +12,6 @@ #include <nvhe/early_alloc.h> #include <nvhe/ffa.h> -#include <nvhe/fixed_config.h> #include <nvhe/gfp.h> #include <nvhe/memory.h> #include <nvhe/mem_protect.h> @@ -180,7 +179,6 @@ static void hpool_put_page(void *addr) static int fix_host_ownership_walker(const struct kvm_pgtable_visit_ctx *ctx, enum kvm_pgtable_walk_flags visit) { - enum kvm_pgtable_prot prot; enum pkvm_page_state state; phys_addr_t phys; @@ -203,16 +201,16 @@ static int fix_host_ownership_walker(const struct kvm_pgtable_visit_ctx *ctx, case PKVM_PAGE_OWNED: return host_stage2_set_owner_locked(phys, PAGE_SIZE, PKVM_ID_HYP); case PKVM_PAGE_SHARED_OWNED: - prot = pkvm_mkstate(PKVM_HOST_MEM_PROT, PKVM_PAGE_SHARED_BORROWED); + hyp_phys_to_page(phys)->host_state = PKVM_PAGE_SHARED_BORROWED; break; case PKVM_PAGE_SHARED_BORROWED: - prot = pkvm_mkstate(PKVM_HOST_MEM_PROT, PKVM_PAGE_SHARED_OWNED); + hyp_phys_to_page(phys)->host_state = PKVM_PAGE_SHARED_OWNED; break; default: return -EINVAL; } - return host_stage2_idmap_locked(phys, PAGE_SIZE, prot); + return 0; } static int fix_hyp_pgtable_refcnt_walker(const struct kvm_pgtable_visit_ctx *ctx, diff --git a/arch/arm64/kvm/hyp/nvhe/stacktrace.c b/arch/arm64/kvm/hyp/nvhe/stacktrace.c index ed6b58b19cfa..5b6eeab1a774 100644 --- a/arch/arm64/kvm/hyp/nvhe/stacktrace.c +++ b/arch/arm64/kvm/hyp/nvhe/stacktrace.c @@ -28,7 +28,7 @@ static void hyp_prepare_backtrace(unsigned long fp, unsigned long pc) struct kvm_nvhe_stacktrace_info *stacktrace_info = this_cpu_ptr(&kvm_stacktrace_info); struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params); - stacktrace_info->stack_base = (unsigned long)(params->stack_hyp_va - PAGE_SIZE); + stacktrace_info->stack_base = (unsigned long)(params->stack_hyp_va - NVHE_STACK_SIZE); stacktrace_info->overflow_stack_base = (unsigned long)this_cpu_ptr(overflow_stack); stacktrace_info->fp = fp; stacktrace_info->pc = pc; @@ -54,7 +54,7 @@ static struct stack_info stackinfo_get_hyp(void) { struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params); unsigned long high = params->stack_hyp_va; - unsigned long low = high - PAGE_SIZE; + unsigned long low = high - NVHE_STACK_SIZE; return (struct stack_info) { .low = low, diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c index 0f6b01b3da5c..6c846d033d24 100644 --- a/arch/arm64/kvm/hyp/nvhe/switch.c +++ b/arch/arm64/kvm/hyp/nvhe/switch.c @@ -26,7 +26,6 @@ #include <asm/debug-monitors.h> #include <asm/processor.h> -#include <nvhe/fixed_config.h> #include <nvhe/mem_protect.h> /* Non-VHE specific context */ @@ -36,33 +35,46 @@ DEFINE_PER_CPU(unsigned long, kvm_hyp_vector); extern void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc); -static void __activate_traps(struct kvm_vcpu *vcpu) +static void __activate_cptr_traps(struct kvm_vcpu *vcpu) { - u64 val; + u64 val = CPTR_EL2_TAM; /* Same bit irrespective of E2H */ - ___activate_traps(vcpu, vcpu->arch.hcr_el2); - __activate_traps_common(vcpu); + if (has_hvhe()) { + val |= CPACR_EL1_TTA; - val = vcpu->arch.cptr_el2; - val |= CPTR_EL2_TAM; /* Same bit irrespective of E2H */ - val |= has_hvhe() ? CPACR_EL1_TTA : CPTR_EL2_TTA; - if (cpus_have_final_cap(ARM64_SME)) { - if (has_hvhe()) - val &= ~CPACR_EL1_SMEN; - else - val |= CPTR_EL2_TSM; - } + if (guest_owns_fp_regs()) { + val |= CPACR_EL1_FPEN; + if (vcpu_has_sve(vcpu)) + val |= CPACR_EL1_ZEN; + } + } else { + val |= CPTR_EL2_TTA | CPTR_NVHE_EL2_RES1; - if (!guest_owns_fp_regs()) { - if (has_hvhe()) - val &= ~(CPACR_EL1_FPEN | CPACR_EL1_ZEN); - else - val |= CPTR_EL2_TFP | CPTR_EL2_TZ; + /* + * Always trap SME since it's not supported in KVM. + * TSM is RES1 if SME isn't implemented. + */ + val |= CPTR_EL2_TSM; - __activate_traps_fpsimd32(vcpu); + if (!vcpu_has_sve(vcpu) || !guest_owns_fp_regs()) + val |= CPTR_EL2_TZ; + + if (!guest_owns_fp_regs()) + val |= CPTR_EL2_TFP; } + if (!guest_owns_fp_regs()) + __activate_traps_fpsimd32(vcpu); + kvm_write_cptr_el2(val); +} + +static void __activate_traps(struct kvm_vcpu *vcpu) +{ + ___activate_traps(vcpu, vcpu->arch.hcr_el2); + __activate_traps_common(vcpu); + __activate_cptr_traps(vcpu); + write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el2); if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) { diff --git a/arch/arm64/kvm/hyp/nvhe/sys_regs.c b/arch/arm64/kvm/hyp/nvhe/sys_regs.c index 2860548d4250..1ddd9ed3cbb3 100644 --- a/arch/arm64/kvm/hyp/nvhe/sys_regs.c +++ b/arch/arm64/kvm/hyp/nvhe/sys_regs.c @@ -11,7 +11,7 @@ #include <hyp/adjust_pc.h> -#include <nvhe/fixed_config.h> +#include <nvhe/pkvm.h> #include "../../sys_regs.h" @@ -28,222 +28,255 @@ u64 id_aa64mmfr1_el1_sys_val; u64 id_aa64mmfr2_el1_sys_val; u64 id_aa64smfr0_el1_sys_val; -/* - * Inject an unknown/undefined exception to an AArch64 guest while most of its - * sysregs are live. - */ -static void inject_undef64(struct kvm_vcpu *vcpu) -{ - u64 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT); - - *vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR); - *vcpu_cpsr(vcpu) = read_sysreg_el2(SYS_SPSR); - - kvm_pend_exception(vcpu, EXCEPT_AA64_EL1_SYNC); - - __kvm_adjust_pc(vcpu); - - write_sysreg_el1(esr, SYS_ESR); - write_sysreg_el1(read_sysreg_el2(SYS_ELR), SYS_ELR); - write_sysreg_el2(*vcpu_pc(vcpu), SYS_ELR); - write_sysreg_el2(*vcpu_cpsr(vcpu), SYS_SPSR); -} - -/* - * Returns the restricted features values of the feature register based on the - * limitations in restrict_fields. - * A feature id field value of 0b0000 does not impose any restrictions. - * Note: Use only for unsigned feature field values. - */ -static u64 get_restricted_features_unsigned(u64 sys_reg_val, - u64 restrict_fields) -{ - u64 value = 0UL; - u64 mask = GENMASK_ULL(ARM64_FEATURE_FIELD_BITS - 1, 0); +struct pvm_ftr_bits { + bool sign; + u8 shift; + u8 width; + u8 max_val; + bool (*vm_supported)(const struct kvm *kvm); +}; - /* - * According to the Arm Architecture Reference Manual, feature fields - * use increasing values to indicate increases in functionality. - * Iterate over the restricted feature fields and calculate the minimum - * unsigned value between the one supported by the system, and what the - * value is being restricted to. - */ - while (sys_reg_val && restrict_fields) { - value |= min(sys_reg_val & mask, restrict_fields & mask); - sys_reg_val &= ~mask; - restrict_fields &= ~mask; - mask <<= ARM64_FEATURE_FIELD_BITS; +#define __MAX_FEAT_FUNC(id, fld, max, func, sgn) \ + { \ + .sign = sgn, \ + .shift = id##_##fld##_SHIFT, \ + .width = id##_##fld##_WIDTH, \ + .max_val = id##_##fld##_##max, \ + .vm_supported = func, \ } - return value; -} - -/* - * Functions that return the value of feature id registers for protected VMs - * based on allowed features, system features, and KVM support. - */ +#define MAX_FEAT_FUNC(id, fld, max, func) \ + __MAX_FEAT_FUNC(id, fld, max, func, id##_##fld##_SIGNED) -static u64 get_pvm_id_aa64pfr0(const struct kvm_vcpu *vcpu) -{ - u64 set_mask = 0; - u64 allow_mask = PVM_ID_AA64PFR0_ALLOW; +#define MAX_FEAT(id, fld, max) \ + MAX_FEAT_FUNC(id, fld, max, NULL) - set_mask |= get_restricted_features_unsigned(id_aa64pfr0_el1_sys_val, - PVM_ID_AA64PFR0_RESTRICT_UNSIGNED); +#define MAX_FEAT_ENUM(id, fld, max) \ + __MAX_FEAT_FUNC(id, fld, max, NULL, false) - return (id_aa64pfr0_el1_sys_val & allow_mask) | set_mask; -} +#define FEAT_END { .width = 0, } -static u64 get_pvm_id_aa64pfr1(const struct kvm_vcpu *vcpu) +static bool vm_has_ptrauth(const struct kvm *kvm) { - const struct kvm *kvm = (const struct kvm *)kern_hyp_va(vcpu->kvm); - u64 allow_mask = PVM_ID_AA64PFR1_ALLOW; - - if (!kvm_has_mte(kvm)) - allow_mask &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE); - - return id_aa64pfr1_el1_sys_val & allow_mask; -} - -static u64 get_pvm_id_aa64zfr0(const struct kvm_vcpu *vcpu) -{ - /* - * No support for Scalable Vectors, therefore, hyp has no sanitized - * copy of the feature id register. - */ - BUILD_BUG_ON(PVM_ID_AA64ZFR0_ALLOW != 0ULL); - return 0; -} - -static u64 get_pvm_id_aa64dfr0(const struct kvm_vcpu *vcpu) -{ - /* - * No support for debug, including breakpoints, and watchpoints, - * therefore, pKVM has no sanitized copy of the feature id register. - */ - BUILD_BUG_ON(PVM_ID_AA64DFR0_ALLOW != 0ULL); - return 0; -} - -static u64 get_pvm_id_aa64dfr1(const struct kvm_vcpu *vcpu) -{ - /* - * No support for debug, therefore, hyp has no sanitized copy of the - * feature id register. - */ - BUILD_BUG_ON(PVM_ID_AA64DFR1_ALLOW != 0ULL); - return 0; -} + if (!IS_ENABLED(CONFIG_ARM64_PTR_AUTH)) + return false; -static u64 get_pvm_id_aa64afr0(const struct kvm_vcpu *vcpu) -{ - /* - * No support for implementation defined features, therefore, hyp has no - * sanitized copy of the feature id register. - */ - BUILD_BUG_ON(PVM_ID_AA64AFR0_ALLOW != 0ULL); - return 0; + return (cpus_have_final_cap(ARM64_HAS_ADDRESS_AUTH) || + cpus_have_final_cap(ARM64_HAS_GENERIC_AUTH)) && + kvm_vcpu_has_feature(kvm, KVM_ARM_VCPU_PTRAUTH_GENERIC); } -static u64 get_pvm_id_aa64afr1(const struct kvm_vcpu *vcpu) +static bool vm_has_sve(const struct kvm *kvm) { - /* - * No support for implementation defined features, therefore, hyp has no - * sanitized copy of the feature id register. - */ - BUILD_BUG_ON(PVM_ID_AA64AFR1_ALLOW != 0ULL); - return 0; + return system_supports_sve() && kvm_vcpu_has_feature(kvm, KVM_ARM_VCPU_SVE); } -static u64 get_pvm_id_aa64isar0(const struct kvm_vcpu *vcpu) -{ - return id_aa64isar0_el1_sys_val & PVM_ID_AA64ISAR0_ALLOW; -} +/* + * Definitions for features to be allowed or restricted for protected guests. + * + * Each field in the masks represents the highest supported value for the + * feature. If a feature field is not present, it is not supported. Moreover, + * these are used to generate the guest's view of the feature registers. + * + * The approach for protected VMs is to at least support features that are: + * - Needed by common Linux distributions (e.g., floating point) + * - Trivial to support, e.g., supporting the feature does not introduce or + * require tracking of additional state in KVM + * - Cannot be trapped or prevent the guest from using anyway + */ -static u64 get_pvm_id_aa64isar1(const struct kvm_vcpu *vcpu) -{ - u64 allow_mask = PVM_ID_AA64ISAR1_ALLOW; +static const struct pvm_ftr_bits pvmid_aa64pfr0[] = { + MAX_FEAT(ID_AA64PFR0_EL1, EL0, IMP), + MAX_FEAT(ID_AA64PFR0_EL1, EL1, IMP), + MAX_FEAT(ID_AA64PFR0_EL1, EL2, IMP), + MAX_FEAT(ID_AA64PFR0_EL1, EL3, IMP), + MAX_FEAT(ID_AA64PFR0_EL1, FP, FP16), + MAX_FEAT(ID_AA64PFR0_EL1, AdvSIMD, FP16), + MAX_FEAT(ID_AA64PFR0_EL1, GIC, IMP), + MAX_FEAT_FUNC(ID_AA64PFR0_EL1, SVE, IMP, vm_has_sve), + MAX_FEAT(ID_AA64PFR0_EL1, RAS, IMP), + MAX_FEAT(ID_AA64PFR0_EL1, DIT, IMP), + MAX_FEAT(ID_AA64PFR0_EL1, CSV2, IMP), + MAX_FEAT(ID_AA64PFR0_EL1, CSV3, IMP), + FEAT_END +}; - if (!vcpu_has_ptrauth(vcpu)) - allow_mask &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_APA) | - ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_API) | - ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPA) | - ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPI)); +static const struct pvm_ftr_bits pvmid_aa64pfr1[] = { + MAX_FEAT(ID_AA64PFR1_EL1, BT, IMP), + MAX_FEAT(ID_AA64PFR1_EL1, SSBS, SSBS2), + MAX_FEAT_ENUM(ID_AA64PFR1_EL1, MTE_frac, NI), + FEAT_END +}; - return id_aa64isar1_el1_sys_val & allow_mask; -} +static const struct pvm_ftr_bits pvmid_aa64mmfr0[] = { + MAX_FEAT_ENUM(ID_AA64MMFR0_EL1, PARANGE, 40), + MAX_FEAT_ENUM(ID_AA64MMFR0_EL1, ASIDBITS, 16), + MAX_FEAT(ID_AA64MMFR0_EL1, BIGEND, IMP), + MAX_FEAT(ID_AA64MMFR0_EL1, SNSMEM, IMP), + MAX_FEAT(ID_AA64MMFR0_EL1, BIGENDEL0, IMP), + MAX_FEAT(ID_AA64MMFR0_EL1, EXS, IMP), + FEAT_END +}; -static u64 get_pvm_id_aa64isar2(const struct kvm_vcpu *vcpu) -{ - u64 allow_mask = PVM_ID_AA64ISAR2_ALLOW; +static const struct pvm_ftr_bits pvmid_aa64mmfr1[] = { + MAX_FEAT(ID_AA64MMFR1_EL1, HAFDBS, DBM), + MAX_FEAT_ENUM(ID_AA64MMFR1_EL1, VMIDBits, 16), + MAX_FEAT(ID_AA64MMFR1_EL1, HPDS, HPDS2), + MAX_FEAT(ID_AA64MMFR1_EL1, PAN, PAN3), + MAX_FEAT(ID_AA64MMFR1_EL1, SpecSEI, IMP), + MAX_FEAT(ID_AA64MMFR1_EL1, ETS, IMP), + MAX_FEAT(ID_AA64MMFR1_EL1, CMOW, IMP), + FEAT_END +}; - if (!vcpu_has_ptrauth(vcpu)) - allow_mask &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_APA3) | - ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_GPA3)); +static const struct pvm_ftr_bits pvmid_aa64mmfr2[] = { + MAX_FEAT(ID_AA64MMFR2_EL1, CnP, IMP), + MAX_FEAT(ID_AA64MMFR2_EL1, UAO, IMP), + MAX_FEAT(ID_AA64MMFR2_EL1, IESB, IMP), + MAX_FEAT(ID_AA64MMFR2_EL1, AT, IMP), + MAX_FEAT_ENUM(ID_AA64MMFR2_EL1, IDS, 0x18), + MAX_FEAT(ID_AA64MMFR2_EL1, TTL, IMP), + MAX_FEAT(ID_AA64MMFR2_EL1, BBM, 2), + MAX_FEAT(ID_AA64MMFR2_EL1, E0PD, IMP), + FEAT_END +}; - return id_aa64isar2_el1_sys_val & allow_mask; -} +static const struct pvm_ftr_bits pvmid_aa64isar1[] = { + MAX_FEAT(ID_AA64ISAR1_EL1, DPB, DPB2), + MAX_FEAT_FUNC(ID_AA64ISAR1_EL1, APA, PAuth, vm_has_ptrauth), + MAX_FEAT_FUNC(ID_AA64ISAR1_EL1, API, PAuth, vm_has_ptrauth), + MAX_FEAT(ID_AA64ISAR1_EL1, JSCVT, IMP), + MAX_FEAT(ID_AA64ISAR1_EL1, FCMA, IMP), + MAX_FEAT(ID_AA64ISAR1_EL1, LRCPC, LRCPC3), + MAX_FEAT(ID_AA64ISAR1_EL1, GPA, IMP), + MAX_FEAT(ID_AA64ISAR1_EL1, GPI, IMP), + MAX_FEAT(ID_AA64ISAR1_EL1, FRINTTS, IMP), + MAX_FEAT(ID_AA64ISAR1_EL1, SB, IMP), + MAX_FEAT(ID_AA64ISAR1_EL1, SPECRES, COSP_RCTX), + MAX_FEAT(ID_AA64ISAR1_EL1, BF16, EBF16), + MAX_FEAT(ID_AA64ISAR1_EL1, DGH, IMP), + MAX_FEAT(ID_AA64ISAR1_EL1, I8MM, IMP), + FEAT_END +}; -static u64 get_pvm_id_aa64mmfr0(const struct kvm_vcpu *vcpu) -{ - u64 set_mask; +static const struct pvm_ftr_bits pvmid_aa64isar2[] = { + MAX_FEAT_FUNC(ID_AA64ISAR2_EL1, GPA3, IMP, vm_has_ptrauth), + MAX_FEAT_FUNC(ID_AA64ISAR2_EL1, APA3, PAuth, vm_has_ptrauth), + MAX_FEAT(ID_AA64ISAR2_EL1, ATS1A, IMP), + FEAT_END +}; - set_mask = get_restricted_features_unsigned(id_aa64mmfr0_el1_sys_val, - PVM_ID_AA64MMFR0_RESTRICT_UNSIGNED); +/* + * None of the features in ID_AA64DFR0_EL1 nor ID_AA64MMFR4_EL1 are supported. + * However, both have Not-Implemented values that are non-zero. Define them + * so they can be used when getting the value of these registers. + */ +#define ID_AA64DFR0_EL1_NONZERO_NI \ +( \ + SYS_FIELD_PREP_ENUM(ID_AA64DFR0_EL1, DoubleLock, NI) | \ + SYS_FIELD_PREP_ENUM(ID_AA64DFR0_EL1, MTPMU, NI) \ +) - return (id_aa64mmfr0_el1_sys_val & PVM_ID_AA64MMFR0_ALLOW) | set_mask; -} +#define ID_AA64MMFR4_EL1_NONZERO_NI \ + SYS_FIELD_PREP_ENUM(ID_AA64MMFR4_EL1, E2H0, NI) -static u64 get_pvm_id_aa64mmfr1(const struct kvm_vcpu *vcpu) +/* + * Returns the value of the feature registers based on the system register + * value, the vcpu support for the revelant features, and the additional + * restrictions for protected VMs. + */ +static u64 get_restricted_features(const struct kvm_vcpu *vcpu, + u64 sys_reg_val, + const struct pvm_ftr_bits restrictions[]) { - return id_aa64mmfr1_el1_sys_val & PVM_ID_AA64MMFR1_ALLOW; -} + u64 val = 0UL; + int i; + + for (i = 0; restrictions[i].width != 0; i++) { + bool (*vm_supported)(const struct kvm *) = restrictions[i].vm_supported; + bool sign = restrictions[i].sign; + int shift = restrictions[i].shift; + int width = restrictions[i].width; + u64 min_signed = (1UL << width) - 1UL; + u64 sign_bit = 1UL << (width - 1); + u64 mask = GENMASK_ULL(width + shift - 1, shift); + u64 sys_val = (sys_reg_val & mask) >> shift; + u64 pvm_max = restrictions[i].max_val; + + if (vm_supported && !vm_supported(vcpu->kvm)) + val |= (sign ? min_signed : 0) << shift; + else if (sign && (sys_val >= sign_bit || pvm_max >= sign_bit)) + val |= max(sys_val, pvm_max) << shift; + else + val |= min(sys_val, pvm_max) << shift; + } -static u64 get_pvm_id_aa64mmfr2(const struct kvm_vcpu *vcpu) -{ - return id_aa64mmfr2_el1_sys_val & PVM_ID_AA64MMFR2_ALLOW; + return val; } -/* Read a sanitized cpufeature ID register by its encoding */ -u64 pvm_read_id_reg(const struct kvm_vcpu *vcpu, u32 id) +static u64 pvm_calc_id_reg(const struct kvm_vcpu *vcpu, u32 id) { switch (id) { case SYS_ID_AA64PFR0_EL1: - return get_pvm_id_aa64pfr0(vcpu); + return get_restricted_features(vcpu, id_aa64pfr0_el1_sys_val, pvmid_aa64pfr0); case SYS_ID_AA64PFR1_EL1: - return get_pvm_id_aa64pfr1(vcpu); - case SYS_ID_AA64ZFR0_EL1: - return get_pvm_id_aa64zfr0(vcpu); - case SYS_ID_AA64DFR0_EL1: - return get_pvm_id_aa64dfr0(vcpu); - case SYS_ID_AA64DFR1_EL1: - return get_pvm_id_aa64dfr1(vcpu); - case SYS_ID_AA64AFR0_EL1: - return get_pvm_id_aa64afr0(vcpu); - case SYS_ID_AA64AFR1_EL1: - return get_pvm_id_aa64afr1(vcpu); + return get_restricted_features(vcpu, id_aa64pfr1_el1_sys_val, pvmid_aa64pfr1); case SYS_ID_AA64ISAR0_EL1: - return get_pvm_id_aa64isar0(vcpu); + return id_aa64isar0_el1_sys_val; case SYS_ID_AA64ISAR1_EL1: - return get_pvm_id_aa64isar1(vcpu); + return get_restricted_features(vcpu, id_aa64isar1_el1_sys_val, pvmid_aa64isar1); case SYS_ID_AA64ISAR2_EL1: - return get_pvm_id_aa64isar2(vcpu); + return get_restricted_features(vcpu, id_aa64isar2_el1_sys_val, pvmid_aa64isar2); case SYS_ID_AA64MMFR0_EL1: - return get_pvm_id_aa64mmfr0(vcpu); + return get_restricted_features(vcpu, id_aa64mmfr0_el1_sys_val, pvmid_aa64mmfr0); case SYS_ID_AA64MMFR1_EL1: - return get_pvm_id_aa64mmfr1(vcpu); + return get_restricted_features(vcpu, id_aa64mmfr1_el1_sys_val, pvmid_aa64mmfr1); case SYS_ID_AA64MMFR2_EL1: - return get_pvm_id_aa64mmfr2(vcpu); + return get_restricted_features(vcpu, id_aa64mmfr2_el1_sys_val, pvmid_aa64mmfr2); + case SYS_ID_AA64DFR0_EL1: + return ID_AA64DFR0_EL1_NONZERO_NI; + case SYS_ID_AA64MMFR4_EL1: + return ID_AA64MMFR4_EL1_NONZERO_NI; default: /* Unhandled ID register, RAZ */ return 0; } } +/* + * Inject an unknown/undefined exception to an AArch64 guest while most of its + * sysregs are live. + */ +static void inject_undef64(struct kvm_vcpu *vcpu) +{ + u64 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT); + + *vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR); + *vcpu_cpsr(vcpu) = read_sysreg_el2(SYS_SPSR); + + kvm_pend_exception(vcpu, EXCEPT_AA64_EL1_SYNC); + + __kvm_adjust_pc(vcpu); + + write_sysreg_el1(esr, SYS_ESR); + write_sysreg_el1(read_sysreg_el2(SYS_ELR), SYS_ELR); + write_sysreg_el2(*vcpu_pc(vcpu), SYS_ELR); + write_sysreg_el2(*vcpu_cpsr(vcpu), SYS_SPSR); +} + static u64 read_id_reg(const struct kvm_vcpu *vcpu, struct sys_reg_desc const *r) { - return pvm_read_id_reg(vcpu, reg_to_encoding(r)); + struct kvm *kvm = vcpu->kvm; + u32 reg = reg_to_encoding(r); + + if (WARN_ON_ONCE(!test_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags))) + return 0; + + if (reg >= sys_reg(3, 0, 0, 1, 0) && reg <= sys_reg(3, 0, 0, 7, 7)) + return kvm->arch.id_regs[IDREG_IDX(reg)]; + + return 0; } /* Handler to RAZ/WI sysregs */ @@ -271,13 +304,6 @@ static bool pvm_access_id_aarch32(struct kvm_vcpu *vcpu, return false; } - /* - * No support for AArch32 guests, therefore, pKVM has no sanitized copy - * of AArch32 feature id registers. - */ - BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL1), - PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) > ID_AA64PFR0_EL1_EL1_IMP); - return pvm_access_raz_wi(vcpu, p, r); } @@ -449,6 +475,30 @@ static const struct sys_reg_desc pvm_sys_reg_descs[] = { }; /* + * Initializes feature registers for protected vms. + */ +void kvm_init_pvm_id_regs(struct kvm_vcpu *vcpu) +{ + struct kvm *kvm = vcpu->kvm; + struct kvm_arch *ka = &kvm->arch; + u32 r; + + hyp_assert_lock_held(&vm_table_lock); + + if (test_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags)) + return; + + /* + * Initialize only AArch64 id registers since AArch32 isn't supported + * for protected VMs. + */ + for (r = sys_reg(3, 0, 0, 4, 0); r <= sys_reg(3, 0, 0, 7, 7); r += sys_reg(0, 0, 0, 0, 1)) + ka->id_regs[IDREG_IDX(r)] = pvm_calc_id_reg(vcpu, r); + + set_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags); +} + +/* * Checks that the sysreg table is unique and in-order. * * Returns 0 if the table is consistent, or 1 otherwise. diff --git a/arch/arm64/kvm/hyp/nvhe/timer-sr.c b/arch/arm64/kvm/hyp/nvhe/timer-sr.c index 3aaab20ae5b4..ff176f4ce7de 100644 --- a/arch/arm64/kvm/hyp/nvhe/timer-sr.c +++ b/arch/arm64/kvm/hyp/nvhe/timer-sr.c @@ -22,15 +22,16 @@ void __kvm_timer_set_cntvoff(u64 cntvoff) */ void __timer_disable_traps(struct kvm_vcpu *vcpu) { - u64 val, shift = 0; + u64 set, clr, shift = 0; if (has_hvhe()) shift = 10; /* Allow physical timer/counter access for the host */ - val = read_sysreg(cnthctl_el2); - val |= (CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN) << shift; - write_sysreg(val, cnthctl_el2); + set = (CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN) << shift; + clr = CNTHCTL_EL1TVT | CNTHCTL_EL1TVCT; + + sysreg_clear_set(cnthctl_el2, clr, set); } /* @@ -58,5 +59,12 @@ void __timer_enable_traps(struct kvm_vcpu *vcpu) set <<= 10; } + /* + * Trap the virtual counter/timer if we have a broken cntvoff + * implementation. + */ + if (has_broken_cntvoff()) + set |= CNTHCTL_EL1TVT | CNTHCTL_EL1TVCT; + sysreg_clear_set(cnthctl_el2, clr, set); } diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c index d2b6fa051d6b..df5cc74a7dd0 100644 --- a/arch/arm64/kvm/hyp/pgtable.c +++ b/arch/arm64/kvm/hyp/pgtable.c @@ -1232,14 +1232,13 @@ int kvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size) NULL, NULL, 0); } -void kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr) +void kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr, + enum kvm_pgtable_walk_flags flags) { int ret; ret = stage2_update_leaf_attrs(pgt, addr, 1, KVM_PTE_LEAF_ATTR_LO_S2_AF, 0, - NULL, NULL, - KVM_PGTABLE_WALK_HANDLE_FAULT | - KVM_PGTABLE_WALK_SHARED); + NULL, NULL, flags); if (!ret) dsb(ishst); } @@ -1295,7 +1294,7 @@ bool kvm_pgtable_stage2_test_clear_young(struct kvm_pgtable *pgt, u64 addr, } int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr, - enum kvm_pgtable_prot prot) + enum kvm_pgtable_prot prot, enum kvm_pgtable_walk_flags flags) { int ret; s8 level; @@ -1313,9 +1312,7 @@ int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr, if (prot & KVM_PGTABLE_PROT_X) clr |= KVM_PTE_LEAF_ATTR_HI_S2_XN; - ret = stage2_update_leaf_attrs(pgt, addr, 1, set, clr, NULL, &level, - KVM_PGTABLE_WALK_HANDLE_FAULT | - KVM_PGTABLE_WALK_SHARED); + ret = stage2_update_leaf_attrs(pgt, addr, 1, set, clr, NULL, &level, flags); if (!ret || ret == -EAGAIN) kvm_call_hyp(__kvm_tlb_flush_vmid_ipa_nsh, pgt->mmu, addr, level); return ret; diff --git a/arch/arm64/kvm/hyp/vhe/debug-sr.c b/arch/arm64/kvm/hyp/vhe/debug-sr.c index 289689b2682d..0100339b09e0 100644 --- a/arch/arm64/kvm/hyp/vhe/debug-sr.c +++ b/arch/arm64/kvm/hyp/vhe/debug-sr.c @@ -19,8 +19,3 @@ void __debug_switch_to_host(struct kvm_vcpu *vcpu) { __debug_switch_to_host_common(vcpu); } - -u64 __kvm_get_mdcr_el2(void) -{ - return read_sysreg(mdcr_el2); -} diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c index 59d992455793..b5b9dbaf1fdd 100644 --- a/arch/arm64/kvm/hyp/vhe/switch.c +++ b/arch/arm64/kvm/hyp/vhe/switch.c @@ -256,6 +256,110 @@ void kvm_vcpu_put_vhe(struct kvm_vcpu *vcpu) host_data_ptr(host_ctxt)->__hyp_running_vcpu = NULL; } +static u64 compute_emulated_cntx_ctl_el0(struct kvm_vcpu *vcpu, + enum vcpu_sysreg reg) +{ + unsigned long ctl; + u64 cval, cnt; + bool stat; + + switch (reg) { + case CNTP_CTL_EL0: + cval = __vcpu_sys_reg(vcpu, CNTP_CVAL_EL0); + ctl = __vcpu_sys_reg(vcpu, CNTP_CTL_EL0); + cnt = compute_counter_value(vcpu_ptimer(vcpu)); + break; + case CNTV_CTL_EL0: + cval = __vcpu_sys_reg(vcpu, CNTV_CVAL_EL0); + ctl = __vcpu_sys_reg(vcpu, CNTV_CTL_EL0); + cnt = compute_counter_value(vcpu_vtimer(vcpu)); + break; + default: + BUG(); + } + + stat = cval <= cnt; + __assign_bit(__ffs(ARCH_TIMER_CTRL_IT_STAT), &ctl, stat); + + return ctl; +} + +static bool kvm_hyp_handle_timer(struct kvm_vcpu *vcpu, u64 *exit_code) +{ + u64 esr, val; + + /* + * Having FEAT_ECV allows for a better quality of timer emulation. + * However, this comes at a huge cost in terms of traps. Try and + * satisfy the reads from guest's hypervisor context without + * returning to the kernel if we can. + */ + if (!is_hyp_ctxt(vcpu)) + return false; + + esr = kvm_vcpu_get_esr(vcpu); + if ((esr & ESR_ELx_SYS64_ISS_DIR_MASK) != ESR_ELx_SYS64_ISS_DIR_READ) + return false; + + switch (esr_sys64_to_sysreg(esr)) { + case SYS_CNTP_CTL_EL02: + val = compute_emulated_cntx_ctl_el0(vcpu, CNTP_CTL_EL0); + break; + case SYS_CNTP_CTL_EL0: + if (vcpu_el2_e2h_is_set(vcpu)) + val = read_sysreg_el0(SYS_CNTP_CTL); + else + val = compute_emulated_cntx_ctl_el0(vcpu, CNTP_CTL_EL0); + break; + case SYS_CNTP_CVAL_EL02: + val = __vcpu_sys_reg(vcpu, CNTP_CVAL_EL0); + break; + case SYS_CNTP_CVAL_EL0: + if (vcpu_el2_e2h_is_set(vcpu)) { + val = read_sysreg_el0(SYS_CNTP_CVAL); + + if (!has_cntpoff()) + val -= timer_get_offset(vcpu_hptimer(vcpu)); + } else { + val = __vcpu_sys_reg(vcpu, CNTP_CVAL_EL0); + } + break; + case SYS_CNTPCT_EL0: + case SYS_CNTPCTSS_EL0: + val = compute_counter_value(vcpu_hptimer(vcpu)); + break; + case SYS_CNTV_CTL_EL02: + val = compute_emulated_cntx_ctl_el0(vcpu, CNTV_CTL_EL0); + break; + case SYS_CNTV_CTL_EL0: + if (vcpu_el2_e2h_is_set(vcpu)) + val = read_sysreg_el0(SYS_CNTV_CTL); + else + val = compute_emulated_cntx_ctl_el0(vcpu, CNTV_CTL_EL0); + break; + case SYS_CNTV_CVAL_EL02: + val = __vcpu_sys_reg(vcpu, CNTV_CVAL_EL0); + break; + case SYS_CNTV_CVAL_EL0: + if (vcpu_el2_e2h_is_set(vcpu)) + val = read_sysreg_el0(SYS_CNTV_CVAL); + else + val = __vcpu_sys_reg(vcpu, CNTV_CVAL_EL0); + break; + case SYS_CNTVCT_EL0: + case SYS_CNTVCTSS_EL0: + val = compute_counter_value(vcpu_hvtimer(vcpu)); + break; + default: + return false; + } + + vcpu_set_reg(vcpu, kvm_vcpu_sys_get_rt(vcpu), val); + __kvm_skip_instr(vcpu); + + return true; +} + static bool kvm_hyp_handle_eret(struct kvm_vcpu *vcpu, u64 *exit_code) { u64 esr = kvm_vcpu_get_esr(vcpu); @@ -409,6 +513,9 @@ static bool kvm_hyp_handle_sysreg_vhe(struct kvm_vcpu *vcpu, u64 *exit_code) if (kvm_hyp_handle_tlbi_el2(vcpu, exit_code)) return true; + if (kvm_hyp_handle_timer(vcpu, exit_code)) + return true; + if (kvm_hyp_handle_cpacr_el1(vcpu, exit_code)) return true; diff --git a/arch/arm64/kvm/hyp/vhe/sysreg-sr.c b/arch/arm64/kvm/hyp/vhe/sysreg-sr.c index 5f78a39053a7..90b018e06f2c 100644 --- a/arch/arm64/kvm/hyp/vhe/sysreg-sr.c +++ b/arch/arm64/kvm/hyp/vhe/sysreg-sr.c @@ -216,7 +216,7 @@ void __vcpu_load_switch_sysregs(struct kvm_vcpu *vcpu) __sysreg32_restore_state(vcpu); __sysreg_restore_user_state(guest_ctxt); - if (unlikely(__is_hyp_ctxt(guest_ctxt))) { + if (unlikely(is_hyp_ctxt(vcpu))) { __sysreg_restore_vel2_state(vcpu); } else { if (vcpu_has_nv(vcpu)) { @@ -260,7 +260,7 @@ void __vcpu_put_switch_sysregs(struct kvm_vcpu *vcpu) host_ctxt = host_data_ptr(host_ctxt); - if (unlikely(__is_hyp_ctxt(guest_ctxt))) + if (unlikely(is_hyp_ctxt(vcpu))) __sysreg_save_vel2_state(vcpu); else __sysreg_save_el1_state(guest_ctxt); diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index c9d46ad57e52..1f55b0c7b11d 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -15,6 +15,7 @@ #include <asm/kvm_arm.h> #include <asm/kvm_mmu.h> #include <asm/kvm_pgtable.h> +#include <asm/kvm_pkvm.h> #include <asm/kvm_ras.h> #include <asm/kvm_asm.h> #include <asm/kvm_emulate.h> @@ -29,8 +30,12 @@ static unsigned long __ro_after_init hyp_idmap_start; static unsigned long __ro_after_init hyp_idmap_end; static phys_addr_t __ro_after_init hyp_idmap_vector; +u32 __ro_after_init __hyp_va_bits; + static unsigned long __ro_after_init io_map_base; +#define KVM_PGT_FN(fn) (!is_protected_kvm_enabled() ? fn : p ## fn) + static phys_addr_t __stage2_range_addr_end(phys_addr_t addr, phys_addr_t end, phys_addr_t size) { @@ -147,7 +152,7 @@ static int kvm_mmu_split_huge_pages(struct kvm *kvm, phys_addr_t addr, return -EINVAL; next = __stage2_range_addr_end(addr, end, chunk_size); - ret = kvm_pgtable_stage2_split(pgt, addr, next - addr, cache); + ret = KVM_PGT_FN(kvm_pgtable_stage2_split)(pgt, addr, next - addr, cache); if (ret) break; } while (addr = next, addr != end); @@ -168,15 +173,23 @@ static bool memslot_is_logging(struct kvm_memory_slot *memslot) */ int kvm_arch_flush_remote_tlbs(struct kvm *kvm) { - kvm_call_hyp(__kvm_tlb_flush_vmid, &kvm->arch.mmu); + if (is_protected_kvm_enabled()) + kvm_call_hyp_nvhe(__pkvm_tlb_flush_vmid, kvm->arch.pkvm.handle); + else + kvm_call_hyp(__kvm_tlb_flush_vmid, &kvm->arch.mmu); return 0; } int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages) { - kvm_tlb_flush_vmid_range(&kvm->arch.mmu, - gfn << PAGE_SHIFT, nr_pages << PAGE_SHIFT); + u64 size = nr_pages << PAGE_SHIFT; + u64 addr = gfn << PAGE_SHIFT; + + if (is_protected_kvm_enabled()) + kvm_call_hyp_nvhe(__pkvm_tlb_flush_vmid, kvm->arch.pkvm.handle); + else + kvm_tlb_flush_vmid_range(&kvm->arch.mmu, addr, size); return 0; } @@ -225,7 +238,7 @@ static void stage2_free_unlinked_table_rcu_cb(struct rcu_head *head) void *pgtable = page_to_virt(page); s8 level = page_private(page); - kvm_pgtable_stage2_free_unlinked(&kvm_s2_mm_ops, pgtable, level); + KVM_PGT_FN(kvm_pgtable_stage2_free_unlinked)(&kvm_s2_mm_ops, pgtable, level); } static void stage2_free_unlinked_table(void *addr, s8 level) @@ -324,7 +337,7 @@ static void __unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 lockdep_assert_held_write(&kvm->mmu_lock); WARN_ON(size & ~PAGE_MASK); - WARN_ON(stage2_apply_range(mmu, start, end, kvm_pgtable_stage2_unmap, + WARN_ON(stage2_apply_range(mmu, start, end, KVM_PGT_FN(kvm_pgtable_stage2_unmap), may_block)); } @@ -336,7 +349,7 @@ void kvm_stage2_unmap_range(struct kvm_s2_mmu *mmu, phys_addr_t start, void kvm_stage2_flush_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, phys_addr_t end) { - stage2_apply_range_resched(mmu, addr, end, kvm_pgtable_stage2_flush); + stage2_apply_range_resched(mmu, addr, end, KVM_PGT_FN(kvm_pgtable_stage2_flush)); } static void stage2_flush_memslot(struct kvm *kvm, @@ -704,10 +717,10 @@ int create_hyp_stack(phys_addr_t phys_addr, unsigned long *haddr) mutex_lock(&kvm_hyp_pgd_mutex); /* - * Efficient stack verification using the PAGE_SHIFT bit implies + * Efficient stack verification using the NVHE_STACK_SHIFT bit implies * an alignment of our allocation on the order of the size. */ - size = PAGE_SIZE * 2; + size = NVHE_STACK_SIZE * 2; base = ALIGN_DOWN(io_map_base - size, size); ret = __hyp_alloc_private_va_range(base); @@ -724,12 +737,12 @@ int create_hyp_stack(phys_addr_t phys_addr, unsigned long *haddr) * at the higher address and leave the lower guard page * unbacked. * - * Any valid stack address now has the PAGE_SHIFT bit as 1 + * Any valid stack address now has the NVHE_STACK_SHIFT bit as 1 * and addresses corresponding to the guard page have the - * PAGE_SHIFT bit as 0 - this is used for overflow detection. + * NVHE_STACK_SHIFT bit as 0 - this is used for overflow detection. */ - ret = __create_hyp_mappings(base + PAGE_SIZE, PAGE_SIZE, phys_addr, - PAGE_HYP); + ret = __create_hyp_mappings(base + NVHE_STACK_SIZE, NVHE_STACK_SIZE, + phys_addr, PAGE_HYP); if (ret) kvm_err("Cannot map hyp stack\n"); @@ -942,10 +955,14 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu, unsigned long t return -ENOMEM; mmu->arch = &kvm->arch; - err = kvm_pgtable_stage2_init(pgt, mmu, &kvm_s2_mm_ops); + err = KVM_PGT_FN(kvm_pgtable_stage2_init)(pgt, mmu, &kvm_s2_mm_ops); if (err) goto out_free_pgtable; + mmu->pgt = pgt; + if (is_protected_kvm_enabled()) + return 0; + mmu->last_vcpu_ran = alloc_percpu(typeof(*mmu->last_vcpu_ran)); if (!mmu->last_vcpu_ran) { err = -ENOMEM; @@ -959,7 +976,6 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu, unsigned long t mmu->split_page_chunk_size = KVM_ARM_EAGER_SPLIT_CHUNK_SIZE_DEFAULT; mmu->split_page_cache.gfp_zero = __GFP_ZERO; - mmu->pgt = pgt; mmu->pgd_phys = __pa(pgt->pgd); if (kvm_is_nested_s2_mmu(kvm, mmu)) @@ -968,7 +984,7 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu, unsigned long t return 0; out_destroy_pgtable: - kvm_pgtable_stage2_destroy(pgt); + KVM_PGT_FN(kvm_pgtable_stage2_destroy)(pgt); out_free_pgtable: kfree(pgt); return err; @@ -1065,7 +1081,7 @@ void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu) write_unlock(&kvm->mmu_lock); if (pgt) { - kvm_pgtable_stage2_destroy(pgt); + KVM_PGT_FN(kvm_pgtable_stage2_destroy)(pgt); kfree(pgt); } } @@ -1082,9 +1098,11 @@ static void *hyp_mc_alloc_fn(void *unused) void free_hyp_memcache(struct kvm_hyp_memcache *mc) { - if (is_protected_kvm_enabled()) - __free_hyp_memcache(mc, hyp_mc_free_fn, - kvm_host_va, NULL); + if (!is_protected_kvm_enabled()) + return; + + kfree(mc->mapping); + __free_hyp_memcache(mc, hyp_mc_free_fn, kvm_host_va, NULL); } int topup_hyp_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages) @@ -1092,6 +1110,12 @@ int topup_hyp_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages) if (!is_protected_kvm_enabled()) return 0; + if (!mc->mapping) { + mc->mapping = kzalloc(sizeof(struct pkvm_mapping), GFP_KERNEL_ACCOUNT); + if (!mc->mapping) + return -ENOMEM; + } + return __topup_hyp_memcache(mc, min_pages, hyp_mc_alloc_fn, kvm_host_pa, NULL); } @@ -1130,8 +1154,8 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, break; write_lock(&kvm->mmu_lock); - ret = kvm_pgtable_stage2_map(pgt, addr, PAGE_SIZE, pa, prot, - &cache, 0); + ret = KVM_PGT_FN(kvm_pgtable_stage2_map)(pgt, addr, PAGE_SIZE, + pa, prot, &cache, 0); write_unlock(&kvm->mmu_lock); if (ret) break; @@ -1151,7 +1175,7 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, */ void kvm_stage2_wp_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, phys_addr_t end) { - stage2_apply_range_resched(mmu, addr, end, kvm_pgtable_stage2_wrprotect); + stage2_apply_range_resched(mmu, addr, end, KVM_PGT_FN(kvm_pgtable_stage2_wrprotect)); } /** @@ -1442,9 +1466,9 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, unsigned long mmu_seq; phys_addr_t ipa = fault_ipa; struct kvm *kvm = vcpu->kvm; - struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; struct vm_area_struct *vma; short vma_shift; + void *memcache; gfn_t gfn; kvm_pfn_t pfn; bool logging_active = memslot_is_logging(memslot); @@ -1452,6 +1476,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R; struct kvm_pgtable *pgt; struct page *page; + enum kvm_pgtable_walk_flags flags = KVM_PGTABLE_WALK_HANDLE_FAULT | KVM_PGTABLE_WALK_SHARED; if (fault_is_perm) fault_granule = kvm_vcpu_trap_get_perm_fault_granule(vcpu); @@ -1471,8 +1496,15 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, * and a write fault needs to collapse a block entry into a table. */ if (!fault_is_perm || (logging_active && write_fault)) { - ret = kvm_mmu_topup_memory_cache(memcache, - kvm_mmu_cache_min_pages(vcpu->arch.hw_mmu)); + int min_pages = kvm_mmu_cache_min_pages(vcpu->arch.hw_mmu); + + if (!is_protected_kvm_enabled()) { + memcache = &vcpu->arch.mmu_page_cache; + ret = kvm_mmu_topup_memory_cache(memcache, min_pages); + } else { + memcache = &vcpu->arch.pkvm_memcache; + ret = topup_hyp_memcache(memcache, min_pages); + } if (ret) return ret; } @@ -1493,7 +1525,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, * logging_active is guaranteed to never be true for VM_PFNMAP * memslots. */ - if (logging_active) { + if (logging_active || is_protected_kvm_enabled()) { force_pte = true; vma_shift = PAGE_SHIFT; } else { @@ -1633,7 +1665,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, prot |= kvm_encode_nested_level(nested); } - read_lock(&kvm->mmu_lock); + kvm_fault_lock(kvm); pgt = vcpu->arch.hw_mmu->pgt; if (mmu_invalidate_retry(kvm, mmu_seq)) { ret = -EAGAIN; @@ -1695,18 +1727,16 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, * PTE, which will be preserved. */ prot &= ~KVM_NV_GUEST_MAP_SZ; - ret = kvm_pgtable_stage2_relax_perms(pgt, fault_ipa, prot); + ret = KVM_PGT_FN(kvm_pgtable_stage2_relax_perms)(pgt, fault_ipa, prot, flags); } else { - ret = kvm_pgtable_stage2_map(pgt, fault_ipa, vma_pagesize, + ret = KVM_PGT_FN(kvm_pgtable_stage2_map)(pgt, fault_ipa, vma_pagesize, __pfn_to_phys(pfn), prot, - memcache, - KVM_PGTABLE_WALK_HANDLE_FAULT | - KVM_PGTABLE_WALK_SHARED); + memcache, flags); } out_unlock: kvm_release_faultin_page(kvm, page, !!ret, writable); - read_unlock(&kvm->mmu_lock); + kvm_fault_unlock(kvm); /* Mark the page dirty only if the fault is handled successfully */ if (writable && !ret) @@ -1718,13 +1748,14 @@ out_unlock: /* Resolve the access fault by making the page young again. */ static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa) { + enum kvm_pgtable_walk_flags flags = KVM_PGTABLE_WALK_HANDLE_FAULT | KVM_PGTABLE_WALK_SHARED; struct kvm_s2_mmu *mmu; trace_kvm_access_fault(fault_ipa); read_lock(&vcpu->kvm->mmu_lock); mmu = vcpu->arch.hw_mmu; - kvm_pgtable_stage2_mkyoung(mmu->pgt, fault_ipa); + KVM_PGT_FN(kvm_pgtable_stage2_mkyoung)(mmu->pgt, fault_ipa, flags); read_unlock(&vcpu->kvm->mmu_lock); } @@ -1764,7 +1795,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu) } /* Falls between the IPA range and the PARange? */ - if (fault_ipa >= BIT_ULL(vcpu->arch.hw_mmu->pgt->ia_bits)) { + if (fault_ipa >= BIT_ULL(VTCR_EL2_IPA(vcpu->arch.hw_mmu->vtcr))) { fault_ipa |= kvm_vcpu_get_hfar(vcpu) & GENMASK(11, 0); if (is_iabt) @@ -1930,7 +1961,7 @@ bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) if (!kvm->arch.mmu.pgt) return false; - return kvm_pgtable_stage2_test_clear_young(kvm->arch.mmu.pgt, + return KVM_PGT_FN(kvm_pgtable_stage2_test_clear_young)(kvm->arch.mmu.pgt, range->start << PAGE_SHIFT, size, true); /* @@ -1946,7 +1977,7 @@ bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) if (!kvm->arch.mmu.pgt) return false; - return kvm_pgtable_stage2_test_clear_young(kvm->arch.mmu.pgt, + return KVM_PGT_FN(kvm_pgtable_stage2_test_clear_young)(kvm->arch.mmu.pgt, range->start << PAGE_SHIFT, size, false); } @@ -2056,6 +2087,7 @@ int __init kvm_mmu_init(u32 *hyp_va_bits) goto out_destroy_pgtable; io_map_base = hyp_idmap_start; + __hyp_va_bits = *hyp_va_bits; return 0; out_destroy_pgtable: diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c index 9b36218b48de..33d2ace68665 100644 --- a/arch/arm64/kvm/nested.c +++ b/arch/arm64/kvm/nested.c @@ -830,8 +830,10 @@ static void limit_nv_id_regs(struct kvm *kvm) NV_FTR(PFR0, RAS) | NV_FTR(PFR0, EL3) | NV_FTR(PFR0, EL2) | - NV_FTR(PFR0, EL1)); - /* 64bit EL1/EL2/EL3 only */ + NV_FTR(PFR0, EL1) | + NV_FTR(PFR0, EL0)); + /* 64bit only at any EL */ + val |= FIELD_PREP(NV_FTR(PFR0, EL0), 0b0001); val |= FIELD_PREP(NV_FTR(PFR0, EL1), 0b0001); val |= FIELD_PREP(NV_FTR(PFR0, EL2), 0b0001); val |= FIELD_PREP(NV_FTR(PFR0, EL3), 0b0001); @@ -963,14 +965,15 @@ static __always_inline void set_sysreg_masks(struct kvm *kvm, int sr, u64 res0, kvm->arch.sysreg_masks->mask[i].res1 = res1; } -int kvm_init_nv_sysregs(struct kvm *kvm) +int kvm_init_nv_sysregs(struct kvm_vcpu *vcpu) { + struct kvm *kvm = vcpu->kvm; u64 res0, res1; lockdep_assert_held(&kvm->arch.config_lock); if (kvm->arch.sysreg_masks) - return 0; + goto out; kvm->arch.sysreg_masks = kzalloc(sizeof(*(kvm->arch.sysreg_masks)), GFP_KERNEL_ACCOUNT); @@ -1021,8 +1024,8 @@ int kvm_init_nv_sysregs(struct kvm *kvm) res0 |= HCR_NV2; if (!kvm_has_feat(kvm, ID_AA64MMFR2_EL1, NV, IMP)) res0 |= (HCR_AT | HCR_NV1 | HCR_NV); - if (!(__vcpu_has_feature(&kvm->arch, KVM_ARM_VCPU_PTRAUTH_ADDRESS) && - __vcpu_has_feature(&kvm->arch, KVM_ARM_VCPU_PTRAUTH_GENERIC))) + if (!(kvm_vcpu_has_feature(kvm, KVM_ARM_VCPU_PTRAUTH_ADDRESS) && + kvm_vcpu_has_feature(kvm, KVM_ARM_VCPU_PTRAUTH_GENERIC))) res0 |= (HCR_API | HCR_APK); if (!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TME, IMP)) res0 |= BIT(39); @@ -1078,8 +1081,8 @@ int kvm_init_nv_sysregs(struct kvm *kvm) /* HFG[RW]TR_EL2 */ res0 = res1 = 0; - if (!(__vcpu_has_feature(&kvm->arch, KVM_ARM_VCPU_PTRAUTH_ADDRESS) && - __vcpu_has_feature(&kvm->arch, KVM_ARM_VCPU_PTRAUTH_GENERIC))) + if (!(kvm_vcpu_has_feature(kvm, KVM_ARM_VCPU_PTRAUTH_ADDRESS) && + kvm_vcpu_has_feature(kvm, KVM_ARM_VCPU_PTRAUTH_GENERIC))) res0 |= (HFGxTR_EL2_APDAKey | HFGxTR_EL2_APDBKey | HFGxTR_EL2_APGAKey | HFGxTR_EL2_APIAKey | HFGxTR_EL2_APIBKey); @@ -1271,6 +1274,25 @@ int kvm_init_nv_sysregs(struct kvm *kvm) res0 |= MDCR_EL2_EnSTEPOP; set_sysreg_masks(kvm, MDCR_EL2, res0, res1); + /* CNTHCTL_EL2 */ + res0 = GENMASK(63, 20); + res1 = 0; + if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, RME, IMP)) + res0 |= CNTHCTL_CNTPMASK | CNTHCTL_CNTVMASK; + if (!kvm_has_feat(kvm, ID_AA64MMFR0_EL1, ECV, CNTPOFF)) { + res0 |= CNTHCTL_ECV; + if (!kvm_has_feat(kvm, ID_AA64MMFR0_EL1, ECV, IMP)) + res0 |= (CNTHCTL_EL1TVT | CNTHCTL_EL1TVCT | + CNTHCTL_EL1NVPCT | CNTHCTL_EL1NVVCT); + } + if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, VH, IMP)) + res0 |= GENMASK(11, 8); + set_sysreg_masks(kvm, CNTHCTL_EL2, res0, res1); + +out: + for (enum vcpu_sysreg sr = __SANITISED_REG_START__; sr < NR_SYS_REGS; sr++) + (void)__vcpu_sys_reg(vcpu, sr); + return 0; } diff --git a/arch/arm64/kvm/pkvm.c b/arch/arm64/kvm/pkvm.c index 85117ea8f351..930b677eb9b0 100644 --- a/arch/arm64/kvm/pkvm.c +++ b/arch/arm64/kvm/pkvm.c @@ -7,6 +7,7 @@ #include <linux/init.h> #include <linux/kmemleak.h> #include <linux/kvm_host.h> +#include <asm/kvm_mmu.h> #include <linux/memblock.h> #include <linux/mutex.h> #include <linux/sort.h> @@ -268,3 +269,203 @@ static int __init finalize_pkvm(void) return ret; } device_initcall_sync(finalize_pkvm); + +static int cmp_mappings(struct rb_node *node, const struct rb_node *parent) +{ + struct pkvm_mapping *a = rb_entry(node, struct pkvm_mapping, node); + struct pkvm_mapping *b = rb_entry(parent, struct pkvm_mapping, node); + + if (a->gfn < b->gfn) + return -1; + if (a->gfn > b->gfn) + return 1; + return 0; +} + +static struct rb_node *find_first_mapping_node(struct rb_root *root, u64 gfn) +{ + struct rb_node *node = root->rb_node, *prev = NULL; + struct pkvm_mapping *mapping; + + while (node) { + mapping = rb_entry(node, struct pkvm_mapping, node); + if (mapping->gfn == gfn) + return node; + prev = node; + node = (gfn < mapping->gfn) ? node->rb_left : node->rb_right; + } + + return prev; +} + +/* + * __tmp is updated to rb_next(__tmp) *before* entering the body of the loop to allow freeing + * of __map inline. + */ +#define for_each_mapping_in_range_safe(__pgt, __start, __end, __map) \ + for (struct rb_node *__tmp = find_first_mapping_node(&(__pgt)->pkvm_mappings, \ + ((__start) >> PAGE_SHIFT)); \ + __tmp && ({ \ + __map = rb_entry(__tmp, struct pkvm_mapping, node); \ + __tmp = rb_next(__tmp); \ + true; \ + }); \ + ) \ + if (__map->gfn < ((__start) >> PAGE_SHIFT)) \ + continue; \ + else if (__map->gfn >= ((__end) >> PAGE_SHIFT)) \ + break; \ + else + +int pkvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu, + struct kvm_pgtable_mm_ops *mm_ops) +{ + pgt->pkvm_mappings = RB_ROOT; + pgt->mmu = mmu; + + return 0; +} + +void pkvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt) +{ + struct kvm *kvm = kvm_s2_mmu_to_kvm(pgt->mmu); + pkvm_handle_t handle = kvm->arch.pkvm.handle; + struct pkvm_mapping *mapping; + struct rb_node *node; + + if (!handle) + return; + + node = rb_first(&pgt->pkvm_mappings); + while (node) { + mapping = rb_entry(node, struct pkvm_mapping, node); + kvm_call_hyp_nvhe(__pkvm_host_unshare_guest, handle, mapping->gfn); + node = rb_next(node); + rb_erase(&mapping->node, &pgt->pkvm_mappings); + kfree(mapping); + } +} + +int pkvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size, + u64 phys, enum kvm_pgtable_prot prot, + void *mc, enum kvm_pgtable_walk_flags flags) +{ + struct kvm *kvm = kvm_s2_mmu_to_kvm(pgt->mmu); + struct pkvm_mapping *mapping = NULL; + struct kvm_hyp_memcache *cache = mc; + u64 gfn = addr >> PAGE_SHIFT; + u64 pfn = phys >> PAGE_SHIFT; + int ret; + + if (size != PAGE_SIZE) + return -EINVAL; + + lockdep_assert_held_write(&kvm->mmu_lock); + ret = kvm_call_hyp_nvhe(__pkvm_host_share_guest, pfn, gfn, prot); + if (ret) { + /* Is the gfn already mapped due to a racing vCPU? */ + if (ret == -EPERM) + return -EAGAIN; + } + + swap(mapping, cache->mapping); + mapping->gfn = gfn; + mapping->pfn = pfn; + WARN_ON(rb_find_add(&mapping->node, &pgt->pkvm_mappings, cmp_mappings)); + + return ret; +} + +int pkvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size) +{ + struct kvm *kvm = kvm_s2_mmu_to_kvm(pgt->mmu); + pkvm_handle_t handle = kvm->arch.pkvm.handle; + struct pkvm_mapping *mapping; + int ret = 0; + + lockdep_assert_held_write(&kvm->mmu_lock); + for_each_mapping_in_range_safe(pgt, addr, addr + size, mapping) { + ret = kvm_call_hyp_nvhe(__pkvm_host_unshare_guest, handle, mapping->gfn); + if (WARN_ON(ret)) + break; + rb_erase(&mapping->node, &pgt->pkvm_mappings); + kfree(mapping); + } + + return ret; +} + +int pkvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size) +{ + struct kvm *kvm = kvm_s2_mmu_to_kvm(pgt->mmu); + pkvm_handle_t handle = kvm->arch.pkvm.handle; + struct pkvm_mapping *mapping; + int ret = 0; + + lockdep_assert_held(&kvm->mmu_lock); + for_each_mapping_in_range_safe(pgt, addr, addr + size, mapping) { + ret = kvm_call_hyp_nvhe(__pkvm_host_wrprotect_guest, handle, mapping->gfn); + if (WARN_ON(ret)) + break; + } + + return ret; +} + +int pkvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size) +{ + struct kvm *kvm = kvm_s2_mmu_to_kvm(pgt->mmu); + struct pkvm_mapping *mapping; + + lockdep_assert_held(&kvm->mmu_lock); + for_each_mapping_in_range_safe(pgt, addr, addr + size, mapping) + __clean_dcache_guest_page(pfn_to_kaddr(mapping->pfn), PAGE_SIZE); + + return 0; +} + +bool pkvm_pgtable_stage2_test_clear_young(struct kvm_pgtable *pgt, u64 addr, u64 size, bool mkold) +{ + struct kvm *kvm = kvm_s2_mmu_to_kvm(pgt->mmu); + pkvm_handle_t handle = kvm->arch.pkvm.handle; + struct pkvm_mapping *mapping; + bool young = false; + + lockdep_assert_held(&kvm->mmu_lock); + for_each_mapping_in_range_safe(pgt, addr, addr + size, mapping) + young |= kvm_call_hyp_nvhe(__pkvm_host_test_clear_young_guest, handle, mapping->gfn, + mkold); + + return young; +} + +int pkvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr, enum kvm_pgtable_prot prot, + enum kvm_pgtable_walk_flags flags) +{ + return kvm_call_hyp_nvhe(__pkvm_host_relax_perms_guest, addr >> PAGE_SHIFT, prot); +} + +void pkvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr, + enum kvm_pgtable_walk_flags flags) +{ + WARN_ON(kvm_call_hyp_nvhe(__pkvm_host_mkyoung_guest, addr >> PAGE_SHIFT)); +} + +void pkvm_pgtable_stage2_free_unlinked(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, s8 level) +{ + WARN_ON_ONCE(1); +} + +kvm_pte_t *pkvm_pgtable_stage2_create_unlinked(struct kvm_pgtable *pgt, u64 phys, s8 level, + enum kvm_pgtable_prot prot, void *mc, bool force_pte) +{ + WARN_ON_ONCE(1); + return NULL; +} + +int pkvm_pgtable_stage2_split(struct kvm_pgtable *pgt, u64 addr, u64 size, + struct kvm_mmu_memory_cache *mc) +{ + WARN_ON_ONCE(1); + return -EINVAL; +} diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index 470524b31951..803e11b0dc8f 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -85,7 +85,7 @@ static void kvm_vcpu_enable_sve(struct kvm_vcpu *vcpu) * KVM_REG_ARM64_SVE_VLS. Allocation is deferred until * kvm_arm_vcpu_finalize(), which freezes the configuration. */ - vcpu_set_flag(vcpu, GUEST_HAS_SVE); + set_bit(KVM_ARCH_FLAG_GUEST_HAS_SVE, &vcpu->kvm->arch.flags); } /* @@ -211,10 +211,6 @@ void kvm_reset_vcpu(struct kvm_vcpu *vcpu) kvm_vcpu_reset_sve(vcpu); } - if (vcpu_has_feature(vcpu, KVM_ARM_VCPU_PTRAUTH_ADDRESS) || - vcpu_has_feature(vcpu, KVM_ARM_VCPU_PTRAUTH_GENERIC)) - kvm_vcpu_enable_ptrauth(vcpu); - if (vcpu_el1_is_32bit(vcpu)) pstate = VCPU_RESET_PSTATE_SVC; else if (vcpu_has_nv(vcpu)) diff --git a/arch/arm64/kvm/stacktrace.c b/arch/arm64/kvm/stacktrace.c index 3ace5b75813b..af5eec681127 100644 --- a/arch/arm64/kvm/stacktrace.c +++ b/arch/arm64/kvm/stacktrace.c @@ -19,6 +19,7 @@ #include <linux/kvm.h> #include <linux/kvm_host.h> +#include <asm/kvm_mmu.h> #include <asm/stacktrace/nvhe.h> static struct stack_info stackinfo_get_overflow(void) @@ -50,7 +51,7 @@ static struct stack_info stackinfo_get_hyp(void) struct kvm_nvhe_stacktrace_info *stacktrace_info = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info); unsigned long low = (unsigned long)stacktrace_info->stack_base; - unsigned long high = low + PAGE_SIZE; + unsigned long high = low + NVHE_STACK_SIZE; return (struct stack_info) { .low = low, @@ -60,8 +61,8 @@ static struct stack_info stackinfo_get_hyp(void) static struct stack_info stackinfo_get_hyp_kern_va(void) { - unsigned long low = (unsigned long)*this_cpu_ptr(&kvm_arm_hyp_stack_page); - unsigned long high = low + PAGE_SIZE; + unsigned long low = (unsigned long)*this_cpu_ptr(&kvm_arm_hyp_stack_base); + unsigned long high = low + NVHE_STACK_SIZE; return (struct stack_info) { .low = low, @@ -145,7 +146,7 @@ static void unwind(struct unwind_state *state, */ static bool kvm_nvhe_dump_backtrace_entry(void *arg, unsigned long where) { - unsigned long va_mask = GENMASK_ULL(vabits_actual - 1, 0); + unsigned long va_mask = GENMASK_ULL(__hyp_va_bits - 1, 0); unsigned long hyp_offset = (unsigned long)arg; /* Mask tags and convert to kern addr */ diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index e4749ecbcd79..f6cd1ea7fb55 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -570,17 +570,10 @@ static bool trap_oslar_el1(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) { - u64 oslsr; - if (!p->is_write) return read_from_write_only(vcpu, p, r); - /* Forward the OSLK bit to OSLSR */ - oslsr = __vcpu_sys_reg(vcpu, OSLSR_EL1) & ~OSLSR_EL1_OSLK; - if (p->regval & OSLAR_EL1_OSLK) - oslsr |= OSLSR_EL1_OSLK; - - __vcpu_sys_reg(vcpu, OSLSR_EL1) = oslsr; + kvm_debug_handle_oslar(vcpu, p->regval); return true; } @@ -621,43 +614,13 @@ static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu, } } -/* - * We want to avoid world-switching all the DBG registers all the - * time: - * - * - If we've touched any debug register, it is likely that we're - * going to touch more of them. It then makes sense to disable the - * traps and start doing the save/restore dance - * - If debug is active (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), it is - * then mandatory to save/restore the registers, as the guest - * depends on them. - * - * For this, we use a DIRTY bit, indicating the guest has modified the - * debug registers, used as follow: - * - * On guest entry: - * - If the dirty bit is set (because we're coming back from trapping), - * disable the traps, save host registers, restore guest registers. - * - If debug is actively in use (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), - * set the dirty bit, disable the traps, save host registers, - * restore guest registers. - * - Otherwise, enable the traps - * - * On guest exit: - * - If the dirty bit is set, save guest registers, restore host - * registers and clear the dirty bit. This ensure that the host can - * now use the debug registers. - */ static bool trap_debug_regs(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) { access_rw(vcpu, p, r); - if (p->is_write) - vcpu_set_flag(vcpu, DEBUG_DIRTY); - - trace_trap_reg(__func__, r->reg, p->is_write, p->regval); + kvm_debug_set_guest_ownership(vcpu); return true; } @@ -666,9 +629,6 @@ static bool trap_debug_regs(struct kvm_vcpu *vcpu, * * A 32 bit write to a debug register leave top bits alone * A 32 bit read from a debug register only returns the bottom bits - * - * All writes will set the DEBUG_DIRTY flag to ensure the hyp code - * switches between host and guest values in future. */ static void reg_to_dbg(struct kvm_vcpu *vcpu, struct sys_reg_params *p, @@ -683,8 +643,6 @@ static void reg_to_dbg(struct kvm_vcpu *vcpu, val &= ~mask; val |= (p->regval & (mask >> shift)) << shift; *dbg_reg = val; - - vcpu_set_flag(vcpu, DEBUG_DIRTY); } static void dbg_to_reg(struct kvm_vcpu *vcpu, @@ -698,152 +656,79 @@ static void dbg_to_reg(struct kvm_vcpu *vcpu, p->regval = (*dbg_reg & mask) >> shift; } -static bool trap_bvr(struct kvm_vcpu *vcpu, - struct sys_reg_params *p, - const struct sys_reg_desc *rd) -{ - u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm]; - - if (p->is_write) - reg_to_dbg(vcpu, p, rd, dbg_reg); - else - dbg_to_reg(vcpu, p, rd, dbg_reg); - - trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg); - - return true; -} - -static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, - u64 val) +static u64 *demux_wb_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd) { - vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm] = val; - return 0; -} + struct kvm_guest_debug_arch *dbg = &vcpu->arch.vcpu_debug_state; -static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, - u64 *val) -{ - *val = vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm]; - return 0; + switch (rd->Op2) { + case 0b100: + return &dbg->dbg_bvr[rd->CRm]; + case 0b101: + return &dbg->dbg_bcr[rd->CRm]; + case 0b110: + return &dbg->dbg_wvr[rd->CRm]; + case 0b111: + return &dbg->dbg_wcr[rd->CRm]; + default: + KVM_BUG_ON(1, vcpu->kvm); + return NULL; + } } -static u64 reset_bvr(struct kvm_vcpu *vcpu, - const struct sys_reg_desc *rd) +static bool trap_dbg_wb_reg(struct kvm_vcpu *vcpu, struct sys_reg_params *p, + const struct sys_reg_desc *rd) { - vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm] = rd->val; - return rd->val; -} + u64 *reg = demux_wb_reg(vcpu, rd); -static bool trap_bcr(struct kvm_vcpu *vcpu, - struct sys_reg_params *p, - const struct sys_reg_desc *rd) -{ - u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm]; + if (!reg) + return false; if (p->is_write) - reg_to_dbg(vcpu, p, rd, dbg_reg); + reg_to_dbg(vcpu, p, rd, reg); else - dbg_to_reg(vcpu, p, rd, dbg_reg); - - trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg); + dbg_to_reg(vcpu, p, rd, reg); + kvm_debug_set_guest_ownership(vcpu); return true; } -static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, - u64 val) -{ - vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm] = val; - return 0; -} - -static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, - u64 *val) -{ - *val = vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm]; - return 0; -} - -static u64 reset_bcr(struct kvm_vcpu *vcpu, - const struct sys_reg_desc *rd) -{ - vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm] = rd->val; - return rd->val; -} - -static bool trap_wvr(struct kvm_vcpu *vcpu, - struct sys_reg_params *p, - const struct sys_reg_desc *rd) +static int set_dbg_wb_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, + u64 val) { - u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm]; - - if (p->is_write) - reg_to_dbg(vcpu, p, rd, dbg_reg); - else - dbg_to_reg(vcpu, p, rd, dbg_reg); + u64 *reg = demux_wb_reg(vcpu, rd); - trace_trap_reg(__func__, rd->CRm, p->is_write, - vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm]); - - return true; -} - -static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, - u64 val) -{ - vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm] = val; - return 0; -} + if (!reg) + return -EINVAL; -static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, - u64 *val) -{ - *val = vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm]; + *reg = val; return 0; } -static u64 reset_wvr(struct kvm_vcpu *vcpu, - const struct sys_reg_desc *rd) -{ - vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm] = rd->val; - return rd->val; -} - -static bool trap_wcr(struct kvm_vcpu *vcpu, - struct sys_reg_params *p, - const struct sys_reg_desc *rd) +static int get_dbg_wb_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, + u64 *val) { - u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm]; + u64 *reg = demux_wb_reg(vcpu, rd); - if (p->is_write) - reg_to_dbg(vcpu, p, rd, dbg_reg); - else - dbg_to_reg(vcpu, p, rd, dbg_reg); - - trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg); - - return true; -} + if (!reg) + return -EINVAL; -static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, - u64 val) -{ - vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm] = val; + *val = *reg; return 0; } -static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, - u64 *val) +static u64 reset_dbg_wb_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd) { - *val = vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm]; - return 0; -} + u64 *reg = demux_wb_reg(vcpu, rd); -static u64 reset_wcr(struct kvm_vcpu *vcpu, - const struct sys_reg_desc *rd) -{ - vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm] = rd->val; + /* + * Bail early if we couldn't find storage for the register, the + * KVM_BUG_ON() in demux_wb_reg() will prevent this VM from ever + * being run. + */ + if (!reg) + return 0; + + *reg = rd->val; return rd->val; } @@ -1350,13 +1235,17 @@ static int set_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */ #define DBG_BCR_BVR_WCR_WVR_EL1(n) \ { SYS_DESC(SYS_DBGBVRn_EL1(n)), \ - trap_bvr, reset_bvr, 0, 0, get_bvr, set_bvr }, \ + trap_dbg_wb_reg, reset_dbg_wb_reg, 0, 0, \ + get_dbg_wb_reg, set_dbg_wb_reg }, \ { SYS_DESC(SYS_DBGBCRn_EL1(n)), \ - trap_bcr, reset_bcr, 0, 0, get_bcr, set_bcr }, \ + trap_dbg_wb_reg, reset_dbg_wb_reg, 0, 0, \ + get_dbg_wb_reg, set_dbg_wb_reg }, \ { SYS_DESC(SYS_DBGWVRn_EL1(n)), \ - trap_wvr, reset_wvr, 0, 0, get_wvr, set_wvr }, \ + trap_dbg_wb_reg, reset_dbg_wb_reg, 0, 0, \ + get_dbg_wb_reg, set_dbg_wb_reg }, \ { SYS_DESC(SYS_DBGWCRn_EL1(n)), \ - trap_wcr, reset_wcr, 0, 0, get_wcr, set_wcr } + trap_dbg_wb_reg, reset_dbg_wb_reg, 0, 0, \ + get_dbg_wb_reg, set_dbg_wb_reg } #define PMU_SYS_REG(name) \ SYS_DESC(SYS_##name), .reset = reset_pmu_reg, \ @@ -1410,26 +1299,146 @@ static bool access_arch_timer(struct kvm_vcpu *vcpu, switch (reg) { case SYS_CNTP_TVAL_EL0: + if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu)) + tmr = TIMER_HPTIMER; + else + tmr = TIMER_PTIMER; + treg = TIMER_REG_TVAL; + break; + + case SYS_CNTV_TVAL_EL0: + if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu)) + tmr = TIMER_HVTIMER; + else + tmr = TIMER_VTIMER; + treg = TIMER_REG_TVAL; + break; + case SYS_AARCH32_CNTP_TVAL: + case SYS_CNTP_TVAL_EL02: tmr = TIMER_PTIMER; treg = TIMER_REG_TVAL; break; + + case SYS_CNTV_TVAL_EL02: + tmr = TIMER_VTIMER; + treg = TIMER_REG_TVAL; + break; + + case SYS_CNTHP_TVAL_EL2: + tmr = TIMER_HPTIMER; + treg = TIMER_REG_TVAL; + break; + + case SYS_CNTHV_TVAL_EL2: + tmr = TIMER_HVTIMER; + treg = TIMER_REG_TVAL; + break; + case SYS_CNTP_CTL_EL0: + if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu)) + tmr = TIMER_HPTIMER; + else + tmr = TIMER_PTIMER; + treg = TIMER_REG_CTL; + break; + + case SYS_CNTV_CTL_EL0: + if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu)) + tmr = TIMER_HVTIMER; + else + tmr = TIMER_VTIMER; + treg = TIMER_REG_CTL; + break; + case SYS_AARCH32_CNTP_CTL: + case SYS_CNTP_CTL_EL02: tmr = TIMER_PTIMER; treg = TIMER_REG_CTL; break; + + case SYS_CNTV_CTL_EL02: + tmr = TIMER_VTIMER; + treg = TIMER_REG_CTL; + break; + + case SYS_CNTHP_CTL_EL2: + tmr = TIMER_HPTIMER; + treg = TIMER_REG_CTL; + break; + + case SYS_CNTHV_CTL_EL2: + tmr = TIMER_HVTIMER; + treg = TIMER_REG_CTL; + break; + case SYS_CNTP_CVAL_EL0: + if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu)) + tmr = TIMER_HPTIMER; + else + tmr = TIMER_PTIMER; + treg = TIMER_REG_CVAL; + break; + + case SYS_CNTV_CVAL_EL0: + if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu)) + tmr = TIMER_HVTIMER; + else + tmr = TIMER_VTIMER; + treg = TIMER_REG_CVAL; + break; + case SYS_AARCH32_CNTP_CVAL: + case SYS_CNTP_CVAL_EL02: tmr = TIMER_PTIMER; treg = TIMER_REG_CVAL; break; + + case SYS_CNTV_CVAL_EL02: + tmr = TIMER_VTIMER; + treg = TIMER_REG_CVAL; + break; + + case SYS_CNTHP_CVAL_EL2: + tmr = TIMER_HPTIMER; + treg = TIMER_REG_CVAL; + break; + + case SYS_CNTHV_CVAL_EL2: + tmr = TIMER_HVTIMER; + treg = TIMER_REG_CVAL; + break; + case SYS_CNTPCT_EL0: case SYS_CNTPCTSS_EL0: + if (is_hyp_ctxt(vcpu)) + tmr = TIMER_HPTIMER; + else + tmr = TIMER_PTIMER; + treg = TIMER_REG_CNT; + break; + case SYS_AARCH32_CNTPCT: + case SYS_AARCH32_CNTPCTSS: tmr = TIMER_PTIMER; treg = TIMER_REG_CNT; break; + + case SYS_CNTVCT_EL0: + case SYS_CNTVCTSS_EL0: + if (is_hyp_ctxt(vcpu)) + tmr = TIMER_HVTIMER; + else + tmr = TIMER_VTIMER; + treg = TIMER_REG_CNT; + break; + + case SYS_AARCH32_CNTVCT: + case SYS_AARCH32_CNTVCTSS: + tmr = TIMER_VTIMER; + treg = TIMER_REG_CNT; + break; + default: print_sys_reg_msg(p, "%s", "Unhandled trapped timer register"); return undef_access(vcpu, p, r); @@ -1599,7 +1608,8 @@ static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu, if (!vcpu_has_ptrauth(vcpu)) val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_APA3) | ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_GPA3)); - if (!cpus_have_final_cap(ARM64_HAS_WFXT)) + if (!cpus_have_final_cap(ARM64_HAS_WFXT) || + has_broken_cntvoff()) val &= ~ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_WFxT); break; case SYS_ID_AA64ISAR3_EL1: @@ -1807,6 +1817,9 @@ static u64 sanitise_id_aa64dfr0_el1(const struct kvm_vcpu *vcpu, u64 val) /* Hide SPE from guests */ val &= ~ID_AA64DFR0_EL1_PMSVer_MASK; + /* Hide BRBE from guests */ + val &= ~ID_AA64DFR0_EL1_BRBE_MASK; + return val; } @@ -2924,11 +2937,17 @@ static const struct sys_reg_desc sys_reg_descs[] = { AMU_AMEVTYPER1_EL0(15), { SYS_DESC(SYS_CNTPCT_EL0), access_arch_timer }, + { SYS_DESC(SYS_CNTVCT_EL0), access_arch_timer }, { SYS_DESC(SYS_CNTPCTSS_EL0), access_arch_timer }, + { SYS_DESC(SYS_CNTVCTSS_EL0), access_arch_timer }, { SYS_DESC(SYS_CNTP_TVAL_EL0), access_arch_timer }, { SYS_DESC(SYS_CNTP_CTL_EL0), access_arch_timer }, { SYS_DESC(SYS_CNTP_CVAL_EL0), access_arch_timer }, + { SYS_DESC(SYS_CNTV_TVAL_EL0), access_arch_timer }, + { SYS_DESC(SYS_CNTV_CTL_EL0), access_arch_timer }, + { SYS_DESC(SYS_CNTV_CVAL_EL0), access_arch_timer }, + /* PMEVCNTRn_EL0 */ PMU_PMEVCNTR_EL0(0), PMU_PMEVCNTR_EL0(1), @@ -3080,9 +3099,24 @@ static const struct sys_reg_desc sys_reg_descs[] = { EL2_REG_VNCR(CNTVOFF_EL2, reset_val, 0), EL2_REG(CNTHCTL_EL2, access_rw, reset_val, 0), + { SYS_DESC(SYS_CNTHP_TVAL_EL2), access_arch_timer }, + EL2_REG(CNTHP_CTL_EL2, access_arch_timer, reset_val, 0), + EL2_REG(CNTHP_CVAL_EL2, access_arch_timer, reset_val, 0), + + { SYS_DESC(SYS_CNTHV_TVAL_EL2), access_arch_timer }, + EL2_REG(CNTHV_CTL_EL2, access_arch_timer, reset_val, 0), + EL2_REG(CNTHV_CVAL_EL2, access_arch_timer, reset_val, 0), { SYS_DESC(SYS_CNTKCTL_EL12), access_cntkctl_el12 }, + { SYS_DESC(SYS_CNTP_TVAL_EL02), access_arch_timer }, + { SYS_DESC(SYS_CNTP_CTL_EL02), access_arch_timer }, + { SYS_DESC(SYS_CNTP_CVAL_EL02), access_arch_timer }, + + { SYS_DESC(SYS_CNTV_TVAL_EL02), access_arch_timer }, + { SYS_DESC(SYS_CNTV_CTL_EL02), access_arch_timer }, + { SYS_DESC(SYS_CNTV_CVAL_EL02), access_arch_timer }, + EL2_REG(SP_EL2, NULL, reset_unknown, 0), }; @@ -3594,18 +3628,20 @@ static bool trap_dbgdidr(struct kvm_vcpu *vcpu, * None of the other registers share their location, so treat them as * if they were 64bit. */ -#define DBG_BCR_BVR_WCR_WVR(n) \ - /* DBGBVRn */ \ - { AA32(LO), Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_bvr, NULL, n }, \ - /* DBGBCRn */ \ - { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_bcr, NULL, n }, \ - /* DBGWVRn */ \ - { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_wvr, NULL, n }, \ - /* DBGWCRn */ \ - { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_wcr, NULL, n } - -#define DBGBXVR(n) \ - { AA32(HI), Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_bvr, NULL, n } +#define DBG_BCR_BVR_WCR_WVR(n) \ + /* DBGBVRn */ \ + { AA32(LO), Op1( 0), CRn( 0), CRm((n)), Op2( 4), \ + trap_dbg_wb_reg, NULL, n }, \ + /* DBGBCRn */ \ + { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_dbg_wb_reg, NULL, n }, \ + /* DBGWVRn */ \ + { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_dbg_wb_reg, NULL, n }, \ + /* DBGWCRn */ \ + { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_dbg_wb_reg, NULL, n } + +#define DBGBXVR(n) \ + { AA32(HI), Op1( 0), CRn( 1), CRm((n)), Op2( 1), \ + trap_dbg_wb_reg, NULL, n } /* * Trapped cp14 registers. We generally ignore most of the external @@ -3902,9 +3938,11 @@ static const struct sys_reg_desc cp15_64_regs[] = { { SYS_DESC(SYS_AARCH32_CNTPCT), access_arch_timer }, { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, TTBR1_EL1 }, { Op1( 1), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_ASGI1R */ + { SYS_DESC(SYS_AARCH32_CNTVCT), access_arch_timer }, { Op1( 2), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI0R */ { SYS_DESC(SYS_AARCH32_CNTP_CVAL), access_arch_timer }, { SYS_DESC(SYS_AARCH32_CNTPCTSS), access_arch_timer }, + { SYS_DESC(SYS_AARCH32_CNTVCTSS), access_arch_timer }, }; static bool check_sysreg_table(const struct sys_reg_desc *table, unsigned int n, @@ -4419,6 +4457,9 @@ void kvm_reset_sys_regs(struct kvm_vcpu *vcpu) reset_vcpu_ftr_id_reg(vcpu, r); else r->reset(vcpu, r); + + if (r->reg >= __SANITISED_REG_START__ && r->reg < NR_SYS_REGS) + (void)__vcpu_sys_reg(vcpu, r->reg); } set_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags); @@ -4995,6 +5036,14 @@ void kvm_calculate_traps(struct kvm_vcpu *vcpu) kvm->arch.fgu[HAFGRTR_GROUP] |= ~(HAFGRTR_EL2_RES0 | HAFGRTR_EL2_RES1); + if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, BRBE, IMP)) { + kvm->arch.fgu[HDFGRTR_GROUP] |= (HDFGRTR_EL2_nBRBDATA | + HDFGRTR_EL2_nBRBCTL | + HDFGRTR_EL2_nBRBIDR); + kvm->arch.fgu[HFGITR_GROUP] |= (HFGITR_EL2_nBRBINJ | + HFGITR_EL2_nBRBIALL); + } + set_bit(KVM_ARCH_FLAG_FGU_INITIALIZED, &kvm->arch.flags); out: mutex_unlock(&kvm->arch.config_lock); @@ -5022,7 +5071,7 @@ int kvm_finalize_sys_regs(struct kvm_vcpu *vcpu) } if (vcpu_has_nv(vcpu)) { - int ret = kvm_init_nv_sysregs(kvm); + int ret = kvm_init_nv_sysregs(vcpu); if (ret) return ret; } diff --git a/arch/arm64/kvm/trace_handle_exit.h b/arch/arm64/kvm/trace_handle_exit.h index 064a58c19f48..f85415db7713 100644 --- a/arch/arm64/kvm/trace_handle_exit.h +++ b/arch/arm64/kvm/trace_handle_exit.h @@ -46,38 +46,6 @@ TRACE_EVENT(kvm_hvc_arm64, __entry->vcpu_pc, __entry->r0, __entry->imm) ); -TRACE_EVENT(kvm_arm_setup_debug, - TP_PROTO(struct kvm_vcpu *vcpu, __u32 guest_debug), - TP_ARGS(vcpu, guest_debug), - - TP_STRUCT__entry( - __field(struct kvm_vcpu *, vcpu) - __field(__u32, guest_debug) - ), - - TP_fast_assign( - __entry->vcpu = vcpu; - __entry->guest_debug = guest_debug; - ), - - TP_printk("vcpu: %p, flags: 0x%08x", __entry->vcpu, __entry->guest_debug) -); - -TRACE_EVENT(kvm_arm_clear_debug, - TP_PROTO(__u32 guest_debug), - TP_ARGS(guest_debug), - - TP_STRUCT__entry( - __field(__u32, guest_debug) - ), - - TP_fast_assign( - __entry->guest_debug = guest_debug; - ), - - TP_printk("flags: 0x%08x", __entry->guest_debug) -); - /* * The dreg32 name is a leftover from a distant past. This will really * output a 64bit value... @@ -99,49 +67,6 @@ TRACE_EVENT(kvm_arm_set_dreg32, TP_printk("%s: 0x%llx", __entry->name, __entry->value) ); -TRACE_DEFINE_SIZEOF(__u64); - -TRACE_EVENT(kvm_arm_set_regset, - TP_PROTO(const char *type, int len, __u64 *control, __u64 *value), - TP_ARGS(type, len, control, value), - TP_STRUCT__entry( - __field(const char *, name) - __field(int, len) - __array(u64, ctrls, 16) - __array(u64, values, 16) - ), - TP_fast_assign( - __entry->name = type; - __entry->len = len; - memcpy(__entry->ctrls, control, len << 3); - memcpy(__entry->values, value, len << 3); - ), - TP_printk("%d %s CTRL:%s VALUE:%s", __entry->len, __entry->name, - __print_array(__entry->ctrls, __entry->len, sizeof(__u64)), - __print_array(__entry->values, __entry->len, sizeof(__u64))) -); - -TRACE_EVENT(trap_reg, - TP_PROTO(const char *fn, int reg, bool is_write, u64 write_value), - TP_ARGS(fn, reg, is_write, write_value), - - TP_STRUCT__entry( - __field(const char *, fn) - __field(int, reg) - __field(bool, is_write) - __field(u64, write_value) - ), - - TP_fast_assign( - __entry->fn = fn; - __entry->reg = reg; - __entry->is_write = is_write; - __entry->write_value = write_value; - ), - - TP_printk("%s %s reg %d (0x%016llx)", __entry->fn, __entry->is_write?"write to":"read from", __entry->reg, __entry->write_value) -); - TRACE_EVENT(kvm_handle_sys_reg, TP_PROTO(unsigned long hsr), TP_ARGS(hsr), diff --git a/arch/arm64/kvm/vgic/vgic-v3.c b/arch/arm64/kvm/vgic/vgic-v3.c index f267bc2486a1..d7233ab982d0 100644 --- a/arch/arm64/kvm/vgic/vgic-v3.c +++ b/arch/arm64/kvm/vgic/vgic-v3.c @@ -6,6 +6,7 @@ #include <linux/kstrtox.h> #include <linux/kvm.h> #include <linux/kvm_host.h> +#include <linux/string_choices.h> #include <kvm/arm_vgic.h> #include <asm/kvm_hyp.h> #include <asm/kvm_mmu.h> @@ -663,9 +664,9 @@ int vgic_v3_probe(const struct gic_kvm_info *info) if (info->has_v4) { kvm_vgic_global_state.has_gicv4 = gicv4_enable; kvm_vgic_global_state.has_gicv4_1 = info->has_v4_1 && gicv4_enable; - kvm_info("GICv4%s support %sabled\n", + kvm_info("GICv4%s support %s\n", kvm_vgic_global_state.has_gicv4_1 ? ".1" : "", - gicv4_enable ? "en" : "dis"); + str_enabled_disabled(gicv4_enable)); } kvm_vgic_global_state.vcpu_base = 0; @@ -734,7 +735,8 @@ void vgic_v3_load(struct kvm_vcpu *vcpu) { struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; - kvm_call_hyp(__vgic_v3_restore_vmcr_aprs, cpu_if); + if (likely(!is_protected_kvm_enabled())) + kvm_call_hyp(__vgic_v3_restore_vmcr_aprs, cpu_if); if (has_vhe()) __vgic_v3_activate_traps(cpu_if); @@ -746,7 +748,8 @@ void vgic_v3_put(struct kvm_vcpu *vcpu) { struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; - kvm_call_hyp(__vgic_v3_save_vmcr_aprs, cpu_if); + if (likely(!is_protected_kvm_enabled())) + kvm_call_hyp(__vgic_v3_save_vmcr_aprs, cpu_if); WARN_ON(vgic_v4_put(vcpu)); if (has_vhe()) diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps index eb17f59e543c..1e65f2fb45bd 100644 --- a/arch/arm64/tools/cpucaps +++ b/arch/arm64/tools/cpucaps @@ -105,6 +105,7 @@ WORKAROUND_CLEAN_CACHE WORKAROUND_DEVICE_LOAD_ACQUIRE WORKAROUND_NVIDIA_CARMEL_CNP WORKAROUND_QCOM_FALKOR_E1003 +WORKAROUND_QCOM_ORYON_CNTVOFF WORKAROUND_REPEAT_TLBI WORKAROUND_SPECULATIVE_AT WORKAROUND_SPECULATIVE_SSBS diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg index 94ca9cdb0b16..762ee084b37c 100644 --- a/arch/arm64/tools/sysreg +++ b/arch/arm64/tools/sysreg @@ -2064,6 +2064,18 @@ Field 17:16 ZEN Res0 15:0 EndSysreg +Sysreg TRFCR_EL1 3 0 1 2 1 +Res0 63:7 +UnsignedEnum 6:5 TS + 0b0001 VIRTUAL + 0b0010 GUEST_PHYSICAL + 0b0011 PHYSICAL +EndEnum +Res0 4:2 +Field 1 ExTRE +Field 0 E0TRE +EndSysregFields + Sysreg SMPRI_EL1 3 0 1 2 4 Res0 63:4 Field 3:0 PRIORITY @@ -2613,6 +2625,22 @@ Field 1 ICIALLU Field 0 ICIALLUIS EndSysreg +Sysreg TRFCR_EL2 3 4 1 2 1 +Res0 63:7 +UnsignedEnum 6:5 TS + 0b0000 USE_TRFCR_EL1_TS + 0b0001 VIRTUAL + 0b0010 GUEST_PHYSICAL + 0b0011 PHYSICAL +EndEnum +Res0 4 +Field 3 CX +Res0 2 +Field 1 E2TRE +Field 0 E0HTRE +EndSysreg + + Sysreg HDFGRTR_EL2 3 4 3 1 4 Field 63 PMBIDR_EL1 Field 62 nPMSNEVFR_EL1 @@ -3023,6 +3051,10 @@ Sysreg ZCR_EL12 3 5 1 2 0 Mapping ZCR_EL1 EndSysreg +Sysreg TRFCR_EL12 3 5 1 2 1 +Mapping TRFCR_EL1 +EndSysreg + Sysreg SMCR_EL12 3 5 1 2 6 Mapping SMCR_EL1 EndSysreg diff --git a/arch/loongarch/Kbuild b/arch/loongarch/Kbuild index bfa21465d83a..beb8499dd8ed 100644 --- a/arch/loongarch/Kbuild +++ b/arch/loongarch/Kbuild @@ -4,7 +4,6 @@ obj-y += net/ obj-y += vdso/ obj-$(CONFIG_KVM) += kvm/ -obj-$(CONFIG_BUILTIN_DTB) += boot/dts/ # for cleaning subdir- += boot diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig index 22b531f45ffa..2b8bd27a852f 100644 --- a/arch/loongarch/Kconfig +++ b/arch/loongarch/Kconfig @@ -249,7 +249,7 @@ config MACH_LOONGSON64 def_bool 64BIT config FIX_EARLYCON_MEM - def_bool y + def_bool !ARCH_IOREMAP config PGTABLE_2LEVEL bool @@ -400,6 +400,7 @@ endchoice config BUILTIN_DTB bool "Enable built-in dtb in kernel" depends on OF + select GENERIC_BUILTIN_DTB help Some existing systems do not provide a canonical device tree to the kernel at boot time. Let's provide a device tree table in the diff --git a/arch/loongarch/boot/dts/Makefile b/arch/loongarch/boot/dts/Makefile index 747d0c3f6389..15d5e14fe418 100644 --- a/arch/loongarch/boot/dts/Makefile +++ b/arch/loongarch/boot/dts/Makefile @@ -1,5 +1,3 @@ # SPDX-License-Identifier: GPL-2.0-only dtb-y = loongson-2k0500-ref.dtb loongson-2k1000-ref.dtb loongson-2k2000-ref.dtb - -obj-$(CONFIG_BUILTIN_DTB) += $(addsuffix .dtb.o, $(CONFIG_BUILTIN_DTB_NAME)) diff --git a/arch/loongarch/include/asm/cpu-info.h b/arch/loongarch/include/asm/cpu-info.h index 900589cb159d..35e0a230a484 100644 --- a/arch/loongarch/include/asm/cpu-info.h +++ b/arch/loongarch/include/asm/cpu-info.h @@ -57,6 +57,7 @@ struct cpuinfo_loongarch { int global_id; /* physical global thread number */ int vabits; /* Virtual Address size in bits */ int pabits; /* Physical Address size in bits */ + int timerbits; /* Width of arch timer in bits */ unsigned int ksave_mask; /* Usable KSave mask. */ unsigned int watch_dreg_count; /* Number data breakpoints */ unsigned int watch_ireg_count; /* Number instruction breakpoints */ diff --git a/arch/loongarch/include/asm/hw_breakpoint.h b/arch/loongarch/include/asm/hw_breakpoint.h index d78330916bd1..13b2462f3d8c 100644 --- a/arch/loongarch/include/asm/hw_breakpoint.h +++ b/arch/loongarch/include/asm/hw_breakpoint.h @@ -38,8 +38,8 @@ struct arch_hw_breakpoint { * Limits. * Changing these will require modifications to the register accessors. */ -#define LOONGARCH_MAX_BRP 8 -#define LOONGARCH_MAX_WRP 8 +#define LOONGARCH_MAX_BRP 14 +#define LOONGARCH_MAX_WRP 14 /* Virtual debug register bases. */ #define CSR_CFG_ADDR 0 diff --git a/arch/loongarch/include/asm/loongarch.h b/arch/loongarch/include/asm/loongarch.h index 64ad277e096e..52651aa0e583 100644 --- a/arch/loongarch/include/asm/loongarch.h +++ b/arch/loongarch/include/asm/loongarch.h @@ -108,6 +108,12 @@ #define CPUCFG3_SPW_HG_HF BIT(11) #define CPUCFG3_RVA BIT(12) #define CPUCFG3_RVAMAX GENMASK(16, 13) +#define CPUCFG3_ALDORDER_CAP BIT(18) /* All address load ordered, capability */ +#define CPUCFG3_ASTORDER_CAP BIT(19) /* All address store ordered, capability */ +#define CPUCFG3_ALDORDER_STA BIT(20) /* All address load ordered, status */ +#define CPUCFG3_ASTORDER_STA BIT(21) /* All address store ordered, status */ +#define CPUCFG3_SLDORDER_CAP BIT(22) /* Same address load ordered, capability */ +#define CPUCFG3_SLDORDER_STA BIT(23) /* Same address load ordered, status */ #define LOONGARCH_CPUCFG4 0x4 #define CPUCFG4_CCFREQ GENMASK(31, 0) @@ -466,7 +472,6 @@ #define LOONGARCH_CSR_TCFG 0x41 /* Timer config */ #define CSR_TCFG_VAL_SHIFT 2 -#define CSR_TCFG_VAL_WIDTH 48 #define CSR_TCFG_VAL (_ULCAST_(0x3fffffffffff) << CSR_TCFG_VAL_SHIFT) #define CSR_TCFG_PERIOD_SHIFT 1 #define CSR_TCFG_PERIOD (_ULCAST_(0x1) << CSR_TCFG_PERIOD_SHIFT) @@ -566,6 +571,15 @@ /* Implement dependent */ #define LOONGARCH_CSR_IMPCTL1 0x80 /* Loongson config1 */ +#define CSR_LDSTORDER_SHIFT 28 +#define CSR_LDSTORDER_WIDTH 3 +#define CSR_LDSTORDER_MASK (_ULCAST_(0x7) << CSR_LDSTORDER_SHIFT) +#define CSR_LDSTORDER_NLD_NST (_ULCAST_(0x0) << CSR_LDSTORDER_SHIFT) /* 000 = No Load No Store */ +#define CSR_LDSTORDER_ALD_NST (_ULCAST_(0x1) << CSR_LDSTORDER_SHIFT) /* 001 = All Load No Store */ +#define CSR_LDSTORDER_SLD_NST (_ULCAST_(0x3) << CSR_LDSTORDER_SHIFT) /* 011 = Same Load No Store */ +#define CSR_LDSTORDER_NLD_AST (_ULCAST_(0x4) << CSR_LDSTORDER_SHIFT) /* 100 = No Load All Store */ +#define CSR_LDSTORDER_ALD_AST (_ULCAST_(0x5) << CSR_LDSTORDER_SHIFT) /* 101 = All Load All Store */ +#define CSR_LDSTORDER_SLD_AST (_ULCAST_(0x7) << CSR_LDSTORDER_SHIFT) /* 111 = Same Load All Store */ #define CSR_MISPEC_SHIFT 20 #define CSR_MISPEC_WIDTH 8 #define CSR_MISPEC (_ULCAST_(0xff) << CSR_MISPEC_SHIFT) @@ -959,6 +973,36 @@ #define LOONGARCH_CSR_DB7CTRL 0x34a /* data breakpoint 7 control */ #define LOONGARCH_CSR_DB7ASID 0x34b /* data breakpoint 7 asid */ +#define LOONGARCH_CSR_DB8ADDR 0x350 /* data breakpoint 8 address */ +#define LOONGARCH_CSR_DB8MASK 0x351 /* data breakpoint 8 mask */ +#define LOONGARCH_CSR_DB8CTRL 0x352 /* data breakpoint 8 control */ +#define LOONGARCH_CSR_DB8ASID 0x353 /* data breakpoint 8 asid */ + +#define LOONGARCH_CSR_DB9ADDR 0x358 /* data breakpoint 9 address */ +#define LOONGARCH_CSR_DB9MASK 0x359 /* data breakpoint 9 mask */ +#define LOONGARCH_CSR_DB9CTRL 0x35a /* data breakpoint 9 control */ +#define LOONGARCH_CSR_DB9ASID 0x35b /* data breakpoint 9 asid */ + +#define LOONGARCH_CSR_DB10ADDR 0x360 /* data breakpoint 10 address */ +#define LOONGARCH_CSR_DB10MASK 0x361 /* data breakpoint 10 mask */ +#define LOONGARCH_CSR_DB10CTRL 0x362 /* data breakpoint 10 control */ +#define LOONGARCH_CSR_DB10ASID 0x363 /* data breakpoint 10 asid */ + +#define LOONGARCH_CSR_DB11ADDR 0x368 /* data breakpoint 11 address */ +#define LOONGARCH_CSR_DB11MASK 0x369 /* data breakpoint 11 mask */ +#define LOONGARCH_CSR_DB11CTRL 0x36a /* data breakpoint 11 control */ +#define LOONGARCH_CSR_DB11ASID 0x36b /* data breakpoint 11 asid */ + +#define LOONGARCH_CSR_DB12ADDR 0x370 /* data breakpoint 12 address */ +#define LOONGARCH_CSR_DB12MASK 0x371 /* data breakpoint 12 mask */ +#define LOONGARCH_CSR_DB12CTRL 0x372 /* data breakpoint 12 control */ +#define LOONGARCH_CSR_DB12ASID 0x373 /* data breakpoint 12 asid */ + +#define LOONGARCH_CSR_DB13ADDR 0x378 /* data breakpoint 13 address */ +#define LOONGARCH_CSR_DB13MASK 0x379 /* data breakpoint 13 mask */ +#define LOONGARCH_CSR_DB13CTRL 0x37a /* data breakpoint 13 control */ +#define LOONGARCH_CSR_DB13ASID 0x37b /* data breakpoint 13 asid */ + #define LOONGARCH_CSR_FWPC 0x380 /* instruction breakpoint config */ #define LOONGARCH_CSR_FWPS 0x381 /* instruction breakpoint status */ @@ -1002,6 +1046,36 @@ #define LOONGARCH_CSR_IB7CTRL 0x3ca /* inst breakpoint 7 control */ #define LOONGARCH_CSR_IB7ASID 0x3cb /* inst breakpoint 7 asid */ +#define LOONGARCH_CSR_IB8ADDR 0x3d0 /* inst breakpoint 8 address */ +#define LOONGARCH_CSR_IB8MASK 0x3d1 /* inst breakpoint 8 mask */ +#define LOONGARCH_CSR_IB8CTRL 0x3d2 /* inst breakpoint 8 control */ +#define LOONGARCH_CSR_IB8ASID 0x3d3 /* inst breakpoint 8 asid */ + +#define LOONGARCH_CSR_IB9ADDR 0x3d8 /* inst breakpoint 9 address */ +#define LOONGARCH_CSR_IB9MASK 0x3d9 /* inst breakpoint 9 mask */ +#define LOONGARCH_CSR_IB9CTRL 0x3da /* inst breakpoint 9 control */ +#define LOONGARCH_CSR_IB9ASID 0x3db /* inst breakpoint 9 asid */ + +#define LOONGARCH_CSR_IB10ADDR 0x3e0 /* inst breakpoint 10 address */ +#define LOONGARCH_CSR_IB10MASK 0x3e1 /* inst breakpoint 10 mask */ +#define LOONGARCH_CSR_IB10CTRL 0x3e2 /* inst breakpoint 10 control */ +#define LOONGARCH_CSR_IB10ASID 0x3e3 /* inst breakpoint 10 asid */ + +#define LOONGARCH_CSR_IB11ADDR 0x3e8 /* inst breakpoint 11 address */ +#define LOONGARCH_CSR_IB11MASK 0x3e9 /* inst breakpoint 11 mask */ +#define LOONGARCH_CSR_IB11CTRL 0x3ea /* inst breakpoint 11 control */ +#define LOONGARCH_CSR_IB11ASID 0x3eb /* inst breakpoint 11 asid */ + +#define LOONGARCH_CSR_IB12ADDR 0x3f0 /* inst breakpoint 12 address */ +#define LOONGARCH_CSR_IB12MASK 0x3f1 /* inst breakpoint 12 mask */ +#define LOONGARCH_CSR_IB12CTRL 0x3f2 /* inst breakpoint 12 control */ +#define LOONGARCH_CSR_IB12ASID 0x3f3 /* inst breakpoint 12 asid */ + +#define LOONGARCH_CSR_IB13ADDR 0x3f8 /* inst breakpoint 13 address */ +#define LOONGARCH_CSR_IB13MASK 0x3f9 /* inst breakpoint 13 mask */ +#define LOONGARCH_CSR_IB13CTRL 0x3fa /* inst breakpoint 13 control */ +#define LOONGARCH_CSR_IB13ASID 0x3fb /* inst breakpoint 13 asid */ + #define LOONGARCH_CSR_DEBUG 0x500 /* debug config */ #define LOONGARCH_CSR_DERA 0x501 /* debug era */ #define LOONGARCH_CSR_DESAVE 0x502 /* debug save */ diff --git a/arch/loongarch/include/asm/pgtable-bits.h b/arch/loongarch/include/asm/pgtable-bits.h index 82cd3a9f094b..45bfc65a0c9f 100644 --- a/arch/loongarch/include/asm/pgtable-bits.h +++ b/arch/loongarch/include/asm/pgtable-bits.h @@ -96,6 +96,13 @@ #define _PAGE_IOREMAP pgprot_val(PAGE_KERNEL_SUC) +#define pgprot_nx pgprot_nx + +static inline pgprot_t pgprot_nx(pgprot_t _prot) +{ + return __pgprot(pgprot_val(_prot) | _PAGE_NO_EXEC); +} + #define pgprot_noncached pgprot_noncached static inline pgprot_t pgprot_noncached(pgprot_t _prot) diff --git a/arch/loongarch/include/uapi/asm/ptrace.h b/arch/loongarch/include/uapi/asm/ptrace.h index ac915f841650..aafb3cd9e943 100644 --- a/arch/loongarch/include/uapi/asm/ptrace.h +++ b/arch/loongarch/include/uapi/asm/ptrace.h @@ -72,6 +72,16 @@ struct user_watch_state { } dbg_regs[8]; }; +struct user_watch_state_v2 { + uint64_t dbg_info; + struct { + uint64_t addr; + uint64_t mask; + uint32_t ctrl; + uint32_t pad; + } dbg_regs[14]; +}; + #define PTRACE_SYSEMU 0x1f #define PTRACE_SYSEMU_SINGLESTEP 0x20 diff --git a/arch/loongarch/kernel/Makefile b/arch/loongarch/kernel/Makefile index 9497968ee158..4853e8b04c6f 100644 --- a/arch/loongarch/kernel/Makefile +++ b/arch/loongarch/kernel/Makefile @@ -10,7 +10,7 @@ extra-y := vmlinux.lds obj-y += head.o cpu-probe.o cacheinfo.o env.o setup.o entry.o genex.o \ traps.o irq.o idle.o process.o dma.o mem.o reset.o switch.o \ elf.o syscall.o signal.o time.o topology.o inst.o ptrace.o vdso.o \ - alternative.o unwind.o + alternative.o kdebugfs.o unwind.o obj-$(CONFIG_ACPI) += acpi.o obj-$(CONFIG_EFI) += efi.o diff --git a/arch/loongarch/kernel/cacheinfo.c b/arch/loongarch/kernel/cacheinfo.c index c7988f757281..8e231b0d2cd6 100644 --- a/arch/loongarch/kernel/cacheinfo.c +++ b/arch/loongarch/kernel/cacheinfo.c @@ -51,6 +51,12 @@ static void cache_cpumap_setup(unsigned int cpu) continue; sib_leaf = sib_cpu_ci->info_list + index; + /* SMT cores share all caches */ + if (cpus_are_siblings(i, cpu)) { + cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map); + cpumask_set_cpu(i, &this_leaf->shared_cpu_map); + } + /* Node's cores share shared caches */ if (cache_leaves_are_shared(this_leaf, sib_leaf)) { cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map); cpumask_set_cpu(i, &this_leaf->shared_cpu_map); diff --git a/arch/loongarch/kernel/cpu-probe.c b/arch/loongarch/kernel/cpu-probe.c index cbce099037b2..fedaa67cde41 100644 --- a/arch/loongarch/kernel/cpu-probe.c +++ b/arch/loongarch/kernel/cpu-probe.c @@ -190,6 +190,7 @@ static void cpu_probe_common(struct cpuinfo_loongarch *c) set_cpu_asid_mask(c, asid_mask); config = read_csr_prcfg1(); + c->timerbits = (config & CSR_CONF1_TMRBITS) >> CSR_CONF1_TMRBITS_SHIFT; c->ksave_mask = GENMASK((config & CSR_CONF1_KSNUM) - 1, 0); c->ksave_mask &= ~(EXC_KSAVE_MASK | PERCPU_KSAVE_MASK | KVM_KSAVE_MASK); diff --git a/arch/loongarch/kernel/hw_breakpoint.c b/arch/loongarch/kernel/hw_breakpoint.c index a6e4b605bfa8..c35f9bf38033 100644 --- a/arch/loongarch/kernel/hw_breakpoint.c +++ b/arch/loongarch/kernel/hw_breakpoint.c @@ -51,7 +51,13 @@ int hw_breakpoint_slots(int type) READ_WB_REG_CASE(OFF, 4, REG, T, VAL); \ READ_WB_REG_CASE(OFF, 5, REG, T, VAL); \ READ_WB_REG_CASE(OFF, 6, REG, T, VAL); \ - READ_WB_REG_CASE(OFF, 7, REG, T, VAL); + READ_WB_REG_CASE(OFF, 7, REG, T, VAL); \ + READ_WB_REG_CASE(OFF, 8, REG, T, VAL); \ + READ_WB_REG_CASE(OFF, 9, REG, T, VAL); \ + READ_WB_REG_CASE(OFF, 10, REG, T, VAL); \ + READ_WB_REG_CASE(OFF, 11, REG, T, VAL); \ + READ_WB_REG_CASE(OFF, 12, REG, T, VAL); \ + READ_WB_REG_CASE(OFF, 13, REG, T, VAL); #define GEN_WRITE_WB_REG_CASES(OFF, REG, T, VAL) \ WRITE_WB_REG_CASE(OFF, 0, REG, T, VAL); \ @@ -61,7 +67,13 @@ int hw_breakpoint_slots(int type) WRITE_WB_REG_CASE(OFF, 4, REG, T, VAL); \ WRITE_WB_REG_CASE(OFF, 5, REG, T, VAL); \ WRITE_WB_REG_CASE(OFF, 6, REG, T, VAL); \ - WRITE_WB_REG_CASE(OFF, 7, REG, T, VAL); + WRITE_WB_REG_CASE(OFF, 7, REG, T, VAL); \ + WRITE_WB_REG_CASE(OFF, 8, REG, T, VAL); \ + WRITE_WB_REG_CASE(OFF, 9, REG, T, VAL); \ + WRITE_WB_REG_CASE(OFF, 10, REG, T, VAL); \ + WRITE_WB_REG_CASE(OFF, 11, REG, T, VAL); \ + WRITE_WB_REG_CASE(OFF, 12, REG, T, VAL); \ + WRITE_WB_REG_CASE(OFF, 13, REG, T, VAL); static u64 read_wb_reg(int reg, int n, int t) { diff --git a/arch/loongarch/kernel/kdebugfs.c b/arch/loongarch/kernel/kdebugfs.c new file mode 100644 index 000000000000..80cf64772399 --- /dev/null +++ b/arch/loongarch/kernel/kdebugfs.c @@ -0,0 +1,168 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/init.h> +#include <linux/export.h> +#include <linux/debugfs.h> +#include <linux/kstrtox.h> +#include <asm/loongarch.h> + +struct dentry *arch_debugfs_dir; +EXPORT_SYMBOL(arch_debugfs_dir); + +static int sfb_state, tso_state; + +static void set_sfb_state(void *info) +{ + int val = *(int *)info << CSR_STFILL_SHIFT; + + csr_xchg32(val, CSR_STFILL, LOONGARCH_CSR_IMPCTL1); +} + +static ssize_t sfb_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) +{ + int s, state; + char str[32]; + + state = (csr_read32(LOONGARCH_CSR_IMPCTL1) & CSR_STFILL) >> CSR_STFILL_SHIFT; + + s = snprintf(str, sizeof(str), "Boot State: %x\nCurrent State: %x\n", sfb_state, state); + + if (*ppos >= s) + return 0; + + s -= *ppos; + s = min_t(u32, s, count); + + if (copy_to_user(buf, &str[*ppos], s)) + return -EFAULT; + + *ppos += s; + + return s; +} + +static ssize_t sfb_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) +{ + int state; + + if (kstrtoint_from_user(buf, count, 10, &state)) + return -EFAULT; + + switch (state) { + case 0: case 1: + on_each_cpu(set_sfb_state, &state, 1); + break; + default: + return -EINVAL; + } + + return count; +} + +static const struct file_operations sfb_fops = { + .read = sfb_read, + .write = sfb_write, + .open = simple_open, + .llseek = default_llseek +}; + +#define LDSTORDER_NLD_NST 0x0 /* 000 = No Load No Store */ +#define LDSTORDER_ALD_NST 0x1 /* 001 = All Load No Store */ +#define LDSTORDER_SLD_NST 0x3 /* 011 = Same Load No Store */ +#define LDSTORDER_NLD_AST 0x4 /* 100 = No Load All Store */ +#define LDSTORDER_ALD_AST 0x5 /* 101 = All Load All Store */ +#define LDSTORDER_SLD_AST 0x7 /* 111 = Same Load All Store */ + +static char *tso_hints[] = { + "No Load No Store", + "All Load No Store", + "Invalid Config", + "Same Load No Store", + "No Load All Store", + "All Load All Store", + "Invalid Config", + "Same Load All Store" +}; + +static void set_tso_state(void *info) +{ + int val = *(int *)info << CSR_LDSTORDER_SHIFT; + + csr_xchg32(val, CSR_LDSTORDER_MASK, LOONGARCH_CSR_IMPCTL1); +} + +static ssize_t tso_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) +{ + int s, state; + char str[240]; + + state = (csr_read32(LOONGARCH_CSR_IMPCTL1) & CSR_LDSTORDER_MASK) >> CSR_LDSTORDER_SHIFT; + + s = snprintf(str, sizeof(str), "Boot State: %d (%s)\n" + "Current State: %d (%s)\n\n" + "Available States:\n" + "0 (%s)\t" "1 (%s)\t" "3 (%s)\n" + "4 (%s)\t" "5 (%s)\t" "7 (%s)\n", + tso_state, tso_hints[tso_state], state, tso_hints[state], + tso_hints[0], tso_hints[1], tso_hints[3], tso_hints[4], tso_hints[5], tso_hints[7]); + + if (*ppos >= s) + return 0; + + s -= *ppos; + s = min_t(u32, s, count); + + if (copy_to_user(buf, &str[*ppos], s)) + return -EFAULT; + + *ppos += s; + + return s; +} + +static ssize_t tso_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) +{ + int state; + + if (kstrtoint_from_user(buf, count, 10, &state)) + return -EFAULT; + + switch (state) { + case 0: case 1: case 3: + case 4: case 5: case 7: + on_each_cpu(set_tso_state, &state, 1); + break; + default: + return -EINVAL; + } + + return count; +} + +static const struct file_operations tso_fops = { + .read = tso_read, + .write = tso_write, + .open = simple_open, + .llseek = default_llseek +}; + +static int __init arch_kdebugfs_init(void) +{ + unsigned int config = read_cpucfg(LOONGARCH_CPUCFG3); + + arch_debugfs_dir = debugfs_create_dir("loongarch", NULL); + + if (config & CPUCFG3_SFB) { + debugfs_create_file("sfb_state", S_IRUGO | S_IWUSR, + arch_debugfs_dir, &sfb_state, &sfb_fops); + sfb_state = (csr_read32(LOONGARCH_CSR_IMPCTL1) & CSR_STFILL) >> CSR_STFILL_SHIFT; + } + + if (config & (CPUCFG3_ALDORDER_CAP | CPUCFG3_ASTORDER_CAP)) { + debugfs_create_file("tso_state", S_IRUGO | S_IWUSR, + arch_debugfs_dir, &tso_state, &tso_fops); + tso_state = (csr_read32(LOONGARCH_CSR_IMPCTL1) & CSR_LDSTORDER_MASK) >> CSR_LDSTORDER_SHIFT; + } + + return 0; +} +postcore_initcall(arch_kdebugfs_init); diff --git a/arch/loongarch/kernel/ptrace.c b/arch/loongarch/kernel/ptrace.c index 19dc6eff45cc..5e2402cfcab0 100644 --- a/arch/loongarch/kernel/ptrace.c +++ b/arch/loongarch/kernel/ptrace.c @@ -720,7 +720,7 @@ static int hw_break_set(struct task_struct *target, unsigned int note_type = regset->core_note_type; /* Resource info */ - offset = offsetof(struct user_watch_state, dbg_regs); + offset = offsetof(struct user_watch_state_v2, dbg_regs); user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset); /* (address, mask, ctrl) registers */ @@ -920,7 +920,7 @@ static const struct user_regset loongarch64_regsets[] = { #ifdef CONFIG_HAVE_HW_BREAKPOINT [REGSET_HW_BREAK] = { .core_note_type = NT_LOONGARCH_HW_BREAK, - .n = sizeof(struct user_watch_state) / sizeof(u32), + .n = sizeof(struct user_watch_state_v2) / sizeof(u32), .size = sizeof(u32), .align = sizeof(u32), .regset_get = hw_break_get, @@ -928,7 +928,7 @@ static const struct user_regset loongarch64_regsets[] = { }, [REGSET_HW_WATCH] = { .core_note_type = NT_LOONGARCH_HW_WATCH, - .n = sizeof(struct user_watch_state) / sizeof(u32), + .n = sizeof(struct user_watch_state_v2) / sizeof(u32), .size = sizeof(u32), .align = sizeof(u32), .regset_get = hw_break_get, diff --git a/arch/loongarch/kernel/switch.S b/arch/loongarch/kernel/switch.S index 31dd8199b245..9c23cb7e432f 100644 --- a/arch/loongarch/kernel/switch.S +++ b/arch/loongarch/kernel/switch.S @@ -12,7 +12,7 @@ /* * task_struct *__switch_to(task_struct *prev, task_struct *next, - * struct thread_info *next_ti) + * struct thread_info *next_ti, void *sched_ra, void *sched_cfa) */ .align 5 SYM_FUNC_START(__switch_to) diff --git a/arch/loongarch/kernel/time.c b/arch/loongarch/kernel/time.c index a07d7eff4dc5..e2d3bfeb6366 100644 --- a/arch/loongarch/kernel/time.c +++ b/arch/loongarch/kernel/time.c @@ -132,7 +132,7 @@ int constant_clockevent_init(void) #else unsigned long min_delta = 1000; #endif - unsigned long max_delta = (1UL << 48) - 1; + unsigned long max_delta = GENMASK_ULL(boot_cpu_data.timerbits, 0); struct clock_event_device *cd; static int irq = 0, timer_irq_installed = 0; diff --git a/arch/loongarch/kernel/traps.c b/arch/loongarch/kernel/traps.c index c57b4134f3e8..2ec3106c0da3 100644 --- a/arch/loongarch/kernel/traps.c +++ b/arch/loongarch/kernel/traps.c @@ -597,17 +597,24 @@ int is_valid_bugaddr(unsigned long addr) static void bug_handler(struct pt_regs *regs) { + if (user_mode(regs)) { + force_sig(SIGTRAP); + return; + } + switch (report_bug(regs->csr_era, regs)) { case BUG_TRAP_TYPE_BUG: - case BUG_TRAP_TYPE_NONE: - die_if_kernel("Oops - BUG", regs); - force_sig(SIGTRAP); + die("Oops - BUG", regs); break; case BUG_TRAP_TYPE_WARN: /* Skip the BUG instruction and continue */ regs->csr_era += LOONGARCH_INSN_SIZE; break; + + default: + if (!fixup_exception(regs)) + die("Oops - BUG", regs); } } diff --git a/arch/loongarch/kernel/unaligned.c b/arch/loongarch/kernel/unaligned.c index 3abf163dda05..487be604b96a 100644 --- a/arch/loongarch/kernel/unaligned.c +++ b/arch/loongarch/kernel/unaligned.c @@ -482,14 +482,10 @@ sigbus: #ifdef CONFIG_DEBUG_FS static int __init debugfs_unaligned(void) { - struct dentry *d; - - d = debugfs_create_dir("loongarch", NULL); - debugfs_create_u32("unaligned_instructions_user", - S_IRUGO, d, &unaligned_instructions_user); + S_IRUGO, arch_debugfs_dir, &unaligned_instructions_user); debugfs_create_u32("unaligned_instructions_kernel", - S_IRUGO, d, &unaligned_instructions_kernel); + S_IRUGO, arch_debugfs_dir, &unaligned_instructions_kernel); return 0; } diff --git a/arch/loongarch/power/platform.c b/arch/loongarch/power/platform.c index 0909729dc2e1..5bbdb9fd76e5 100644 --- a/arch/loongarch/power/platform.c +++ b/arch/loongarch/power/platform.c @@ -17,7 +17,7 @@ void enable_gpe_wakeup(void) if (acpi_gbl_reduced_hardware) return; - acpi_enable_all_wakeup_gpes(); + acpi_hw_enable_all_wakeup_gpes(); } void enable_pci_wakeup(void) diff --git a/arch/loongarch/power/suspend_asm.S b/arch/loongarch/power/suspend_asm.S index 9fe28d5a0270..df0865df26fa 100644 --- a/arch/loongarch/power/suspend_asm.S +++ b/arch/loongarch/power/suspend_asm.S @@ -30,9 +30,6 @@ st.d $r29, sp, PT_R29 st.d $r30, sp, PT_R30 st.d $r31, sp, PT_R31 - - la.pcrel t0, acpi_saved_sp - st.d sp, t0, 0 .endm .macro SETUP_WAKEUP @@ -51,6 +48,7 @@ ld.d $r29, sp, PT_R29 ld.d $r30, sp, PT_R30 ld.d $r31, sp, PT_R31 + addi.d sp, sp, PT_SIZE .endm .text @@ -59,6 +57,10 @@ /* Sleep/wakeup code for Loongson-3 */ SYM_FUNC_START(loongarch_suspend_enter) SETUP_SLEEP + + la.pcrel t0, acpi_saved_sp + st.d sp, t0, 0 + bl __flush_cache_all /* Pass RA and SP to BIOS */ @@ -82,7 +84,7 @@ SYM_INNER_LABEL(loongarch_wakeup_start, SYM_L_GLOBAL) la.pcrel t0, acpi_saved_sp ld.d sp, t0, 0 + SETUP_WAKEUP - addi.d sp, sp, PT_SIZE jr ra SYM_FUNC_END(loongarch_suspend_enter) diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c index 09bd93464b4f..9ec265fcaff4 100644 --- a/arch/powerpc/platforms/powernv/opal.c +++ b/arch/powerpc/platforms/powernv/opal.c @@ -815,7 +815,7 @@ static int opal_add_one_export(struct kobject *parent, const char *export_name, sysfs_bin_attr_init(attr); attr->attr.name = name; attr->attr.mode = 0400; - attr->read = sysfs_bin_attr_simple_read; + attr->read_new = sysfs_bin_attr_simple_read; attr->private = __va(vals[0]); attr->size = vals[1]; diff --git a/arch/sparc/kernel/irq_32.c b/arch/sparc/kernel/irq_32.c index 8605dd710f3c..5210991429d5 100644 --- a/arch/sparc/kernel/irq_32.c +++ b/arch/sparc/kernel/irq_32.c @@ -199,18 +199,18 @@ int arch_show_interrupts(struct seq_file *p, int prec) int j; #ifdef CONFIG_SMP - seq_printf(p, "RES: "); + seq_printf(p, "RES:"); for_each_online_cpu(j) - seq_printf(p, "%10u ", cpu_data(j).irq_resched_count); + seq_put_decimal_ull_width(p, " ", cpu_data(j).irq_resched_count, 10); seq_printf(p, " IPI rescheduling interrupts\n"); - seq_printf(p, "CAL: "); + seq_printf(p, "CAL:"); for_each_online_cpu(j) - seq_printf(p, "%10u ", cpu_data(j).irq_call_count); + seq_put_decimal_ull_width(p, " ", cpu_data(j).irq_call_count, 10); seq_printf(p, " IPI function call interrupts\n"); #endif - seq_printf(p, "NMI: "); + seq_printf(p, "NMI:"); for_each_online_cpu(j) - seq_printf(p, "%10u ", cpu_data(j).counter); + seq_put_decimal_ull_width(p, " ", cpu_data(j).counter, 10); seq_printf(p, " Non-maskable interrupts\n"); return 0; } diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c index 01ee800efde3..ded463c82abd 100644 --- a/arch/sparc/kernel/irq_64.c +++ b/arch/sparc/kernel/irq_64.c @@ -22,6 +22,7 @@ #include <linux/seq_file.h> #include <linux/ftrace.h> #include <linux/irq.h> +#include <linux/string_choices.h> #include <asm/ptrace.h> #include <asm/processor.h> @@ -145,9 +146,7 @@ static int hv_irq_version; */ static bool sun4v_cookie_only_virqs(void) { - if (hv_irq_version >= 3) - return true; - return false; + return hv_irq_version >= 3; } static void __init irq_init_hv(void) @@ -170,7 +169,7 @@ static void __init irq_init_hv(void) pr_info("SUN4V: Using IRQ API major %d, cookie only virqs %s\n", hv_irq_version, - sun4v_cookie_only_virqs() ? "enabled" : "disabled"); + str_enabled_disabled(sun4v_cookie_only_virqs())); } /* This function is for the timer interrupt.*/ @@ -304,9 +303,9 @@ int arch_show_interrupts(struct seq_file *p, int prec) { int j; - seq_printf(p, "NMI: "); + seq_printf(p, "NMI:"); for_each_online_cpu(j) - seq_printf(p, "%10u ", cpu_data(j).__nmi_count); + seq_put_decimal_ull_width(p, " ", cpu_data(j).__nmi_count, 10); seq_printf(p, " Non-maskable interrupts\n"); return 0; } diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c index 50a0927a84a6..ddac216a2aff 100644 --- a/arch/sparc/kernel/pci.c +++ b/arch/sparc/kernel/pci.c @@ -932,7 +932,7 @@ static void pci_bus_slot_names(struct device_node *node, struct pci_bus *bus) { const struct pci_slot_names { u32 slot_mask; - char names[0]; + char names[]; } *prop; const char *sp; int len, i; diff --git a/arch/sparc/kernel/vio.c b/arch/sparc/kernel/vio.c index 07933d75ac81..1a1a9d6b8f2e 100644 --- a/arch/sparc/kernel/vio.c +++ b/arch/sparc/kernel/vio.c @@ -419,13 +419,13 @@ struct vio_remove_node_data { u64 node; }; -static int vio_md_node_match(struct device *dev, void *arg) +static int vio_md_node_match(struct device *dev, const void *arg) { struct vio_dev *vdev = to_vio_dev(dev); - struct vio_remove_node_data *node_data; + const struct vio_remove_node_data *node_data; u64 node; - node_data = (struct vio_remove_node_data *)arg; + node_data = (const struct vio_remove_node_data *)arg; node = vio_vdev_node(node_data->hp, vdev); diff --git a/arch/x86/coco/sev/Makefile b/arch/x86/coco/sev/Makefile index 08de37559307..dcb06dc8b5ae 100644 --- a/arch/x86/coco/sev/Makefile +++ b/arch/x86/coco/sev/Makefile @@ -2,6 +2,10 @@ obj-y += core.o +# jump tables are emitted using absolute references in non-PIC code +# so they cannot be used in the early SEV startup code +CFLAGS_core.o += -fno-jump-tables + ifdef CONFIG_FUNCTION_TRACER CFLAGS_REMOVE_core.o = -pg endif diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 45a395862fbc..f1cf7f2909f3 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -1138,6 +1138,7 @@ static void blkcg_fill_root_iostats(void) blkg_iostat_set(&blkg->iostat.cur, &tmp); u64_stats_update_end_irqrestore(&blkg->iostat.sync, flags); } + class_dev_iter_exit(&iter); } static void blkcg_print_one_stat(struct blkcg_gq *blkg, struct seq_file *s) diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h index 79bbfe00d241..b8543a34caea 100644 --- a/drivers/acpi/acpica/achware.h +++ b/drivers/acpi/acpica/achware.h @@ -103,8 +103,6 @@ acpi_hw_get_gpe_status(struct acpi_gpe_event_info *gpe_event_info, acpi_status acpi_hw_enable_all_runtime_gpes(void); -acpi_status acpi_hw_enable_all_wakeup_gpes(void); - u8 acpi_hw_check_all_gpes(acpi_handle gpe_skip_device, u32 gpe_skip_number); acpi_status diff --git a/drivers/base/bus.c b/drivers/base/bus.c index 657c93c38b0d..6b9e65a42cd2 100644 --- a/drivers/base/bus.c +++ b/drivers/base/bus.c @@ -354,7 +354,7 @@ static struct device *next_device(struct klist_iter *i) * count in the supplied callback. */ int bus_for_each_dev(const struct bus_type *bus, struct device *start, - void *data, int (*fn)(struct device *, void *)) + void *data, device_iter_t fn) { struct subsys_private *sp = bus_to_subsys(bus); struct klist_iter i; @@ -402,9 +402,12 @@ struct device *bus_find_device(const struct bus_type *bus, klist_iter_init_node(&sp->klist_devices, &i, (start ? &start->p->knode_bus : NULL)); - while ((dev = next_device(&i))) - if (match(dev, data) && get_device(dev)) + while ((dev = next_device(&i))) { + if (match(dev, data)) { + get_device(dev); break; + } + } klist_iter_exit(&i); subsys_put(sp); return dev; diff --git a/drivers/base/class.c b/drivers/base/class.c index 582b5a02a5c4..2526c57d924e 100644 --- a/drivers/base/class.c +++ b/drivers/base/class.c @@ -323,8 +323,12 @@ void class_dev_iter_init(struct class_dev_iter *iter, const struct class *class, struct subsys_private *sp = class_to_subsys(class); struct klist_node *start_knode = NULL; - if (!sp) + memset(iter, 0, sizeof(*iter)); + if (!sp) { + pr_crit("%s: class %p was not registered yet\n", + __func__, class); return; + } if (start) start_knode = &start->p->knode_class; @@ -351,6 +355,9 @@ struct device *class_dev_iter_next(struct class_dev_iter *iter) struct klist_node *knode; struct device *dev; + if (!iter->sp) + return NULL; + while (1) { knode = klist_next(&iter->ki); if (!knode) @@ -395,7 +402,7 @@ EXPORT_SYMBOL_GPL(class_dev_iter_exit); * code. There's no locking restriction. */ int class_for_each_device(const struct class *class, const struct device *start, - void *data, int (*fn)(struct device *, void *)) + void *data, device_iter_t fn) { struct subsys_private *sp = class_to_subsys(class); struct class_dev_iter iter; @@ -594,30 +601,10 @@ EXPORT_SYMBOL_GPL(class_compat_unregister); * a bus device * @cls: the compatibility class * @dev: the target bus device - * @device_link: an optional device to which a "device" link should be created */ -int class_compat_create_link(struct class_compat *cls, struct device *dev, - struct device *device_link) +int class_compat_create_link(struct class_compat *cls, struct device *dev) { - int error; - - error = sysfs_create_link(cls->kobj, &dev->kobj, dev_name(dev)); - if (error) - return error; - - /* - * Optionally add a "device" link (typically to the parent), as a - * class device would have one and we want to provide as much - * backwards compatibility as possible. - */ - if (device_link) { - error = sysfs_create_link(&dev->kobj, &device_link->kobj, - "device"); - if (error) - sysfs_remove_link(cls->kobj, dev_name(dev)); - } - - return error; + return sysfs_create_link(cls->kobj, &dev->kobj, dev_name(dev)); } EXPORT_SYMBOL_GPL(class_compat_create_link); @@ -626,14 +613,9 @@ EXPORT_SYMBOL_GPL(class_compat_create_link); * a bus device * @cls: the compatibility class * @dev: the target bus device - * @device_link: an optional device to which a "device" link was previously - * created */ -void class_compat_remove_link(struct class_compat *cls, struct device *dev, - struct device *device_link) +void class_compat_remove_link(struct class_compat *cls, struct device *dev) { - if (device_link) - sysfs_remove_link(&dev->kobj, "device"); sysfs_remove_link(cls->kobj, dev_name(dev)); } EXPORT_SYMBOL_GPL(class_compat_remove_link); diff --git a/drivers/base/core.c b/drivers/base/core.c index 94865c9d8adc..5a1f05198114 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -3980,7 +3980,7 @@ const char *device_get_devnode(const struct device *dev, * other than 0, we break out and return that value. */ int device_for_each_child(struct device *parent, void *data, - int (*fn)(struct device *dev, void *data)) + device_iter_t fn) { struct klist_iter i; struct device *child; @@ -4010,7 +4010,7 @@ EXPORT_SYMBOL_GPL(device_for_each_child); * other than 0, we break out and return that value. */ int device_for_each_child_reverse(struct device *parent, void *data, - int (*fn)(struct device *dev, void *data)) + device_iter_t fn) { struct klist_iter i; struct device *child; @@ -4043,14 +4043,14 @@ EXPORT_SYMBOL_GPL(device_for_each_child_reverse); * device_for_each_child_reverse_from(); */ int device_for_each_child_reverse_from(struct device *parent, - struct device *from, const void *data, - int (*fn)(struct device *, const void *)) + struct device *from, void *data, + device_iter_t fn) { struct klist_iter i; struct device *child; int error = 0; - if (!parent->p) + if (!parent || !parent->p) return 0; klist_iter_init_node(&parent->p->klist_children, &i, @@ -4079,8 +4079,8 @@ EXPORT_SYMBOL_GPL(device_for_each_child_reverse_from); * * NOTE: you will need to drop the reference with put_device() after use. */ -struct device *device_find_child(struct device *parent, void *data, - int (*match)(struct device *dev, void *data)) +struct device *device_find_child(struct device *parent, const void *data, + device_match_t match) { struct klist_iter i; struct device *child; @@ -4089,62 +4089,17 @@ struct device *device_find_child(struct device *parent, void *data, return NULL; klist_iter_init(&parent->p->klist_children, &i); - while ((child = next_device(&i))) - if (match(child, data) && get_device(child)) + while ((child = next_device(&i))) { + if (match(child, data)) { + get_device(child); break; + } + } klist_iter_exit(&i); return child; } EXPORT_SYMBOL_GPL(device_find_child); -/** - * device_find_child_by_name - device iterator for locating a child device. - * @parent: parent struct device - * @name: name of the child device - * - * This is similar to the device_find_child() function above, but it - * returns a reference to a device that has the name @name. - * - * NOTE: you will need to drop the reference with put_device() after use. - */ -struct device *device_find_child_by_name(struct device *parent, - const char *name) -{ - struct klist_iter i; - struct device *child; - - if (!parent) - return NULL; - - klist_iter_init(&parent->p->klist_children, &i); - while ((child = next_device(&i))) - if (sysfs_streq(dev_name(child), name) && get_device(child)) - break; - klist_iter_exit(&i); - return child; -} -EXPORT_SYMBOL_GPL(device_find_child_by_name); - -static int match_any(struct device *dev, void *unused) -{ - return 1; -} - -/** - * device_find_any_child - device iterator for locating a child device, if any. - * @parent: parent struct device - * - * This is similar to the device_find_child() function above, but it - * returns a reference to a child device, if any. - * - * NOTE: you will need to drop the reference with put_device() after use. - */ -struct device *device_find_any_child(struct device *parent) -{ - return device_find_child(parent, NULL, match_any); -} -EXPORT_SYMBOL_GPL(device_find_any_child); - int __init devices_init(void) { devices_kset = kset_create_and_add("devices", &device_uevent_ops, NULL); @@ -5244,15 +5199,21 @@ int device_match_name(struct device *dev, const void *name) } EXPORT_SYMBOL_GPL(device_match_name); +int device_match_type(struct device *dev, const void *type) +{ + return dev->type == type; +} +EXPORT_SYMBOL_GPL(device_match_type); + int device_match_of_node(struct device *dev, const void *np) { - return dev->of_node == np; + return np && dev->of_node == np; } EXPORT_SYMBOL_GPL(device_match_of_node); int device_match_fwnode(struct device *dev, const void *fwnode) { - return dev_fwnode(dev) == fwnode; + return fwnode && dev_fwnode(dev) == fwnode; } EXPORT_SYMBOL_GPL(device_match_fwnode); @@ -5264,13 +5225,13 @@ EXPORT_SYMBOL_GPL(device_match_devt); int device_match_acpi_dev(struct device *dev, const void *adev) { - return ACPI_COMPANION(dev) == adev; + return adev && ACPI_COMPANION(dev) == adev; } EXPORT_SYMBOL(device_match_acpi_dev); int device_match_acpi_handle(struct device *dev, const void *handle) { - return ACPI_HANDLE(dev) == handle; + return handle && ACPI_HANDLE(dev) == handle; } EXPORT_SYMBOL(device_match_acpi_handle); diff --git a/drivers/base/devcoredump.c b/drivers/base/devcoredump.c index c795edad1b96..64840e5d5fcc 100644 --- a/drivers/base/devcoredump.c +++ b/drivers/base/devcoredump.c @@ -106,7 +106,7 @@ static void devcd_del(struct work_struct *wk) } static ssize_t devcd_data_read(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin_attr, + const struct bin_attribute *bin_attr, char *buffer, loff_t offset, size_t count) { struct device *dev = kobj_to_dev(kobj); @@ -116,7 +116,7 @@ static ssize_t devcd_data_read(struct file *filp, struct kobject *kobj, } static ssize_t devcd_data_write(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin_attr, + const struct bin_attribute *bin_attr, char *buffer, loff_t offset, size_t count) { struct device *dev = kobj_to_dev(kobj); @@ -132,19 +132,15 @@ static ssize_t devcd_data_write(struct file *filp, struct kobject *kobj, return count; } -static struct bin_attribute devcd_attr_data = { - .attr = { .name = "data", .mode = S_IRUSR | S_IWUSR, }, - .size = 0, - .read = devcd_data_read, - .write = devcd_data_write, -}; +static const struct bin_attribute devcd_attr_data = + __BIN_ATTR(data, 0600, devcd_data_read, devcd_data_write, 0); -static struct bin_attribute *devcd_dev_bin_attrs[] = { +static const struct bin_attribute *const devcd_dev_bin_attrs[] = { &devcd_attr_data, NULL, }; static const struct attribute_group devcd_dev_group = { - .bin_attrs = devcd_dev_bin_attrs, + .bin_attrs_new = devcd_dev_bin_attrs, }; static const struct attribute_group *devcd_dev_groups[] = { @@ -186,9 +182,9 @@ static ssize_t disabled_show(const struct class *class, const struct class_attri * mutex_lock(&devcd->mutex); * * - * In the above diagram, It looks like disabled_store() would be racing with parallely + * In the above diagram, it looks like disabled_store() would be racing with parallelly * running devcd_del() and result in memory abort while acquiring devcd->mutex which - * is called after kfree of devcd memory after dropping its last reference with + * is called after kfree of devcd memory after dropping its last reference with * put_device(). However, this will not happens as fn(dev, data) runs * with its own reference to device via klist_node so it is not its last reference. * so, above situation would not occur. @@ -285,6 +281,8 @@ static void devcd_free_sgtable(void *data) * @offset: start copy from @offset@ bytes from the head of the data * in the given scatterlist * @data_len: the length of the data in the sg_table + * + * Returns: the number of bytes copied */ static ssize_t devcd_read_from_sgtable(char *buffer, loff_t offset, size_t buf_len, void *data, diff --git a/drivers/base/devres.c b/drivers/base/devres.c index 2152eec0c135..93e7779ef21e 100644 --- a/drivers/base/devres.c +++ b/drivers/base/devres.c @@ -750,25 +750,38 @@ int __devm_add_action(struct device *dev, void (*action)(void *), void *data, co EXPORT_SYMBOL_GPL(__devm_add_action); /** - * devm_remove_action() - removes previously added custom action + * devm_remove_action_nowarn() - removes previously added custom action * @dev: Device that owns the action * @action: Function implementing the action * @data: Pointer to data passed to @action implementation * * Removes instance of @action previously added by devm_add_action(). * Both action and data should match one of the existing entries. + * + * In contrast to devm_remove_action(), this function does not WARN() if no + * entry could have been found. + * + * This should only be used if the action is contained in an object with + * independent lifetime management, e.g. the Devres rust abstraction. + * + * Causing the warning from regular driver code most likely indicates an abuse + * of the devres API. + * + * Returns: 0 on success, -ENOENT if no entry could have been found. */ -void devm_remove_action(struct device *dev, void (*action)(void *), void *data) +int devm_remove_action_nowarn(struct device *dev, + void (*action)(void *), + void *data) { struct action_devres devres = { .data = data, .action = action, }; - WARN_ON(devres_destroy(dev, devm_action_release, devm_action_match, - &devres)); + return devres_destroy(dev, devm_action_release, devm_action_match, + &devres); } -EXPORT_SYMBOL_GPL(devm_remove_action); +EXPORT_SYMBOL_GPL(devm_remove_action_nowarn); /** * devm_release_action() - release previously added custom action diff --git a/drivers/base/driver.c b/drivers/base/driver.c index b4eb5b89c4ee..8ab010ddf709 100644 --- a/drivers/base/driver.c +++ b/drivers/base/driver.c @@ -115,7 +115,7 @@ EXPORT_SYMBOL_GPL(driver_set_override); * Iterate over the @drv's list of devices calling @fn for each one. */ int driver_for_each_device(struct device_driver *drv, struct device *start, - void *data, int (*fn)(struct device *, void *)) + void *data, device_iter_t fn) { struct klist_iter i; struct device *dev; @@ -160,9 +160,12 @@ struct device *driver_find_device(const struct device_driver *drv, klist_iter_init_node(&drv->p->klist_devices, &i, (start ? &start->p->knode_driver : NULL)); - while ((dev = next_device(&i))) - if (match(dev, data) && get_device(dev)) + while ((dev = next_device(&i))) { + if (match(dev, data)) { + get_device(dev); break; + } + } klist_iter_exit(&i); return dev; } diff --git a/drivers/base/firmware_loader/sysfs.c b/drivers/base/firmware_loader/sysfs.c index c9c93b47d9a5..d254ceb56d84 100644 --- a/drivers/base/firmware_loader/sysfs.c +++ b/drivers/base/firmware_loader/sysfs.c @@ -259,7 +259,7 @@ static void firmware_rw(struct fw_priv *fw_priv, char *buffer, } static ssize_t firmware_data_read(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin_attr, + const struct bin_attribute *bin_attr, char *buffer, loff_t offset, size_t count) { struct device *dev = kobj_to_dev(kobj); @@ -316,7 +316,7 @@ static int fw_realloc_pages(struct fw_sysfs *fw_sysfs, int min_size) * the driver as a firmware image. **/ static ssize_t firmware_data_write(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin_attr, + const struct bin_attribute *bin_attr, char *buffer, loff_t offset, size_t count) { struct device *dev = kobj_to_dev(kobj); @@ -356,11 +356,11 @@ out: return retval; } -static struct bin_attribute firmware_attr_data = { +static const struct bin_attribute firmware_attr_data = { .attr = { .name = "data", .mode = 0644 }, .size = 0, - .read = firmware_data_read, - .write = firmware_data_write, + .read_new = firmware_data_read, + .write_new = firmware_data_write, }; static struct attribute *fw_dev_attrs[] = { @@ -374,14 +374,14 @@ static struct attribute *fw_dev_attrs[] = { NULL }; -static struct bin_attribute *fw_dev_bin_attrs[] = { +static const struct bin_attribute *const fw_dev_bin_attrs[] = { &firmware_attr_data, NULL }; static const struct attribute_group fw_dev_attr_group = { .attrs = fw_dev_attrs, - .bin_attrs = fw_dev_bin_attrs, + .bin_attrs_new = fw_dev_bin_attrs, #ifdef CONFIG_FW_UPLOAD .is_visible = fw_upload_is_visible, #endif diff --git a/drivers/base/test/Kconfig b/drivers/base/test/Kconfig index 5c7fac80611c..2756870615cc 100644 --- a/drivers/base/test/Kconfig +++ b/drivers/base/test/Kconfig @@ -12,6 +12,7 @@ config TEST_ASYNC_DRIVER_PROBE config DM_KUNIT_TEST tristate "KUnit Tests for the device model" if !KUNIT_ALL_TESTS depends on KUNIT + default KUNIT_ALL_TESTS config DRIVER_PE_KUNIT_TEST tristate "KUnit Tests for property entry API" if !KUNIT_ALL_TESTS diff --git a/drivers/base/test/platform-device-test.c b/drivers/base/test/platform-device-test.c index ea05b8785743..6355a2231b74 100644 --- a/drivers/base/test/platform-device-test.c +++ b/drivers/base/test/platform-device-test.c @@ -1,8 +1,11 @@ // SPDX-License-Identifier: GPL-2.0 +#include <kunit/platform_device.h> #include <kunit/resource.h> #include <linux/device.h> +#include <linux/device/bus.h> +#include <linux/of_platform.h> #include <linux/platform_device.h> #define DEVICE_NAME "test" @@ -217,7 +220,43 @@ static struct kunit_suite platform_device_devm_test_suite = { .test_cases = platform_device_devm_tests, }; -kunit_test_suite(platform_device_devm_test_suite); +static void platform_device_find_by_null_test(struct kunit *test) +{ + struct platform_device *pdev; + int ret; + + pdev = kunit_platform_device_alloc(test, DEVICE_NAME, PLATFORM_DEVID_NONE); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pdev); + + ret = kunit_platform_device_add(test, pdev); + KUNIT_ASSERT_EQ(test, ret, 0); + + KUNIT_EXPECT_PTR_EQ(test, of_find_device_by_node(NULL), NULL); + + KUNIT_EXPECT_PTR_EQ(test, bus_find_device_by_of_node(&platform_bus_type, NULL), NULL); + KUNIT_EXPECT_PTR_EQ(test, bus_find_device_by_fwnode(&platform_bus_type, NULL), NULL); + KUNIT_EXPECT_PTR_EQ(test, bus_find_device_by_acpi_dev(&platform_bus_type, NULL), NULL); + + KUNIT_EXPECT_FALSE(test, device_match_of_node(&pdev->dev, NULL)); + KUNIT_EXPECT_FALSE(test, device_match_fwnode(&pdev->dev, NULL)); + KUNIT_EXPECT_FALSE(test, device_match_acpi_dev(&pdev->dev, NULL)); + KUNIT_EXPECT_FALSE(test, device_match_acpi_handle(&pdev->dev, NULL)); +} + +static struct kunit_case platform_device_match_tests[] = { + KUNIT_CASE(platform_device_find_by_null_test), + {} +}; + +static struct kunit_suite platform_device_match_test_suite = { + .name = "platform-device-match", + .test_cases = platform_device_match_tests, +}; + +kunit_test_suites( + &platform_device_devm_test_suite, + &platform_device_match_test_suite, +); MODULE_DESCRIPTION("Test module for platform devices"); MODULE_AUTHOR("Maxime Ripard <mripard@kernel.org>"); diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c index 88dcae6ec575..e4d1e7284dae 100644 --- a/drivers/block/sunvdc.c +++ b/drivers/block/sunvdc.c @@ -918,12 +918,12 @@ struct vdc_check_port_data { char *type; }; -static int vdc_device_probed(struct device *dev, void *arg) +static int vdc_device_probed(struct device *dev, const void *arg) { struct vio_dev *vdev = to_vio_dev(dev); - struct vdc_check_port_data *port_data; + const struct vdc_check_port_data *port_data; - port_data = (struct vdc_check_port_data *)arg; + port_data = (const struct vdc_check_port_data *)arg; if ((vdev->dev_no == port_data->dev_no) && (!(strcmp((char *)&vdev->type, port_data->type))) && diff --git a/drivers/bus/fsl-mc/dprc-driver.c b/drivers/bus/fsl-mc/dprc-driver.c index 4b68c84ef485..52053f7c6d9a 100644 --- a/drivers/bus/fsl-mc/dprc-driver.c +++ b/drivers/bus/fsl-mc/dprc-driver.c @@ -22,8 +22,8 @@ struct fsl_mc_child_objs { struct fsl_mc_obj_desc *child_array; }; -static bool fsl_mc_device_match(struct fsl_mc_device *mc_dev, - struct fsl_mc_obj_desc *obj_desc) +static bool fsl_mc_device_match(const struct fsl_mc_device *mc_dev, + const struct fsl_mc_obj_desc *obj_desc) { return mc_dev->obj_desc.id == obj_desc->id && strcmp(mc_dev->obj_desc.type, obj_desc->type) == 0; @@ -112,9 +112,9 @@ void dprc_remove_devices(struct fsl_mc_device *mc_bus_dev, } EXPORT_SYMBOL_GPL(dprc_remove_devices); -static int __fsl_mc_device_match(struct device *dev, void *data) +static int __fsl_mc_device_match(struct device *dev, const void *data) { - struct fsl_mc_obj_desc *obj_desc = data; + const struct fsl_mc_obj_desc *obj_desc = data; struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev); return fsl_mc_device_match(mc_dev, obj_desc); diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c index 2916d1333649..d1f3d327ddd1 100644 --- a/drivers/bus/fsl-mc/fsl-mc-bus.c +++ b/drivers/bus/fsl-mc/fsl-mc-bus.c @@ -320,90 +320,90 @@ const struct bus_type fsl_mc_bus_type = { }; EXPORT_SYMBOL_GPL(fsl_mc_bus_type); -struct device_type fsl_mc_bus_dprc_type = { +const struct device_type fsl_mc_bus_dprc_type = { .name = "fsl_mc_bus_dprc" }; EXPORT_SYMBOL_GPL(fsl_mc_bus_dprc_type); -struct device_type fsl_mc_bus_dpni_type = { +const struct device_type fsl_mc_bus_dpni_type = { .name = "fsl_mc_bus_dpni" }; EXPORT_SYMBOL_GPL(fsl_mc_bus_dpni_type); -struct device_type fsl_mc_bus_dpio_type = { +const struct device_type fsl_mc_bus_dpio_type = { .name = "fsl_mc_bus_dpio" }; EXPORT_SYMBOL_GPL(fsl_mc_bus_dpio_type); -struct device_type fsl_mc_bus_dpsw_type = { +const struct device_type fsl_mc_bus_dpsw_type = { .name = "fsl_mc_bus_dpsw" }; EXPORT_SYMBOL_GPL(fsl_mc_bus_dpsw_type); -struct device_type fsl_mc_bus_dpbp_type = { +const struct device_type fsl_mc_bus_dpbp_type = { .name = "fsl_mc_bus_dpbp" }; EXPORT_SYMBOL_GPL(fsl_mc_bus_dpbp_type); -struct device_type fsl_mc_bus_dpcon_type = { +const struct device_type fsl_mc_bus_dpcon_type = { .name = "fsl_mc_bus_dpcon" }; EXPORT_SYMBOL_GPL(fsl_mc_bus_dpcon_type); -struct device_type fsl_mc_bus_dpmcp_type = { +const struct device_type fsl_mc_bus_dpmcp_type = { .name = "fsl_mc_bus_dpmcp" }; EXPORT_SYMBOL_GPL(fsl_mc_bus_dpmcp_type); -struct device_type fsl_mc_bus_dpmac_type = { +const struct device_type fsl_mc_bus_dpmac_type = { .name = "fsl_mc_bus_dpmac" }; EXPORT_SYMBOL_GPL(fsl_mc_bus_dpmac_type); -struct device_type fsl_mc_bus_dprtc_type = { +const struct device_type fsl_mc_bus_dprtc_type = { .name = "fsl_mc_bus_dprtc" }; EXPORT_SYMBOL_GPL(fsl_mc_bus_dprtc_type); -struct device_type fsl_mc_bus_dpseci_type = { +const struct device_type fsl_mc_bus_dpseci_type = { .name = "fsl_mc_bus_dpseci" }; EXPORT_SYMBOL_GPL(fsl_mc_bus_dpseci_type); -struct device_type fsl_mc_bus_dpdmux_type = { +const struct device_type fsl_mc_bus_dpdmux_type = { .name = "fsl_mc_bus_dpdmux" }; EXPORT_SYMBOL_GPL(fsl_mc_bus_dpdmux_type); -struct device_type fsl_mc_bus_dpdcei_type = { +const struct device_type fsl_mc_bus_dpdcei_type = { .name = "fsl_mc_bus_dpdcei" }; EXPORT_SYMBOL_GPL(fsl_mc_bus_dpdcei_type); -struct device_type fsl_mc_bus_dpaiop_type = { +const struct device_type fsl_mc_bus_dpaiop_type = { .name = "fsl_mc_bus_dpaiop" }; EXPORT_SYMBOL_GPL(fsl_mc_bus_dpaiop_type); -struct device_type fsl_mc_bus_dpci_type = { +const struct device_type fsl_mc_bus_dpci_type = { .name = "fsl_mc_bus_dpci" }; EXPORT_SYMBOL_GPL(fsl_mc_bus_dpci_type); -struct device_type fsl_mc_bus_dpdmai_type = { +const struct device_type fsl_mc_bus_dpdmai_type = { .name = "fsl_mc_bus_dpdmai" }; EXPORT_SYMBOL_GPL(fsl_mc_bus_dpdmai_type); -struct device_type fsl_mc_bus_dpdbg_type = { +const struct device_type fsl_mc_bus_dpdbg_type = { .name = "fsl_mc_bus_dpdbg" }; EXPORT_SYMBOL_GPL(fsl_mc_bus_dpdbg_type); -static struct device_type *fsl_mc_get_device_type(const char *type) +static const struct device_type *fsl_mc_get_device_type(const char *type) { static const struct { - struct device_type *dev_type; + const struct device_type *dev_type; const char *type; } dev_types[] = { { &fsl_mc_bus_dprc_type, "dprc" }, diff --git a/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c b/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c index c8241f5a0a26..f20ae7e35a0d 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c +++ b/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c @@ -473,22 +473,6 @@ unlock_and_exit: } DEFINE_SHOW_STORE_ATTRIBUTE(tl_control); -static int get_rp_index_from_file(const struct file *f, u8 *rp_id, u8 rp_num) -{ - char alpha; - u8 index; - int ret; - - ret = sscanf(f->f_path.dentry->d_name.name, ADF_TL_RP_REGS_FNAME, &alpha); - if (ret != 1) - return -EINVAL; - - index = ADF_TL_DBG_RP_INDEX_ALPHA(alpha); - *rp_id = index; - - return 0; -} - static int adf_tl_dbg_change_rp_index(struct adf_accel_dev *accel_dev, unsigned int new_rp_num, unsigned int rp_regs_index) @@ -611,18 +595,11 @@ static int tl_rp_data_show(struct seq_file *s, void *unused) { struct adf_accel_dev *accel_dev = s->private; u8 rp_regs_index; - u8 max_rp; - int ret; if (!accel_dev) return -EINVAL; - max_rp = GET_TL_DATA(accel_dev).max_rp; - ret = get_rp_index_from_file(s->file, &rp_regs_index, max_rp); - if (ret) { - dev_dbg(&GET_DEV(accel_dev), "invalid RP data file name\n"); - return ret; - } + rp_regs_index = debugfs_get_aux_num(s->file); return tl_print_rp_data(accel_dev, s, rp_regs_index); } @@ -635,7 +612,6 @@ static ssize_t tl_rp_data_write(struct file *file, const char __user *userbuf, struct adf_telemetry *telemetry; unsigned int new_rp_num; u8 rp_regs_index; - u8 max_rp; int ret; accel_dev = seq_f->private; @@ -643,15 +619,10 @@ static ssize_t tl_rp_data_write(struct file *file, const char __user *userbuf, return -EINVAL; telemetry = accel_dev->telemetry; - max_rp = GET_TL_DATA(accel_dev).max_rp; mutex_lock(&telemetry->wr_lock); - ret = get_rp_index_from_file(file, &rp_regs_index, max_rp); - if (ret) { - dev_dbg(&GET_DEV(accel_dev), "invalid RP data file name\n"); - goto unlock_and_exit; - } + rp_regs_index = debugfs_get_aux_num(file); ret = kstrtou32_from_user(userbuf, count, 10, &new_rp_num); if (ret) @@ -689,7 +660,8 @@ void adf_tl_dbgfs_add(struct adf_accel_dev *accel_dev) for (i = 0; i < max_rp; i++) { snprintf(name, sizeof(name), ADF_TL_RP_REGS_FNAME, ADF_TL_DBG_RP_ALPHA_INDEX(i)); - debugfs_create_file(name, 0644, dir, accel_dev, &tl_rp_data_fops); + debugfs_create_file_aux_num(name, 0644, dir, accel_dev, i, + &tl_rp_data_fops); } } diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c index 28edd5822486..50e6a45b30ba 100644 --- a/drivers/cxl/core/hdm.c +++ b/drivers/cxl/core/hdm.c @@ -703,7 +703,7 @@ static int cxl_decoder_commit(struct cxl_decoder *cxld) return 0; } -static int commit_reap(struct device *dev, const void *data) +static int commit_reap(struct device *dev, void *data) { struct cxl_port *port = to_cxl_port(dev->parent); struct cxl_decoder *cxld; diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c index 9d58ab9d33c5..a3c57f96138a 100644 --- a/drivers/cxl/core/pci.c +++ b/drivers/cxl/core/pci.c @@ -252,9 +252,9 @@ static int devm_cxl_enable_mem(struct device *host, struct cxl_dev_state *cxlds) } /* require dvsec ranges to be covered by a locked platform window */ -static int dvsec_range_allowed(struct device *dev, void *arg) +static int dvsec_range_allowed(struct device *dev, const void *arg) { - struct range *dev_range = arg; + const struct range *dev_range = arg; struct cxl_decoder *cxld; if (!is_root_decoder(dev)) diff --git a/drivers/cxl/core/pmem.c b/drivers/cxl/core/pmem.c index b3378d3f6acb..8853415c106a 100644 --- a/drivers/cxl/core/pmem.c +++ b/drivers/cxl/core/pmem.c @@ -51,17 +51,6 @@ struct cxl_nvdimm_bridge *to_cxl_nvdimm_bridge(struct device *dev) } EXPORT_SYMBOL_NS_GPL(to_cxl_nvdimm_bridge, "CXL"); -bool is_cxl_nvdimm_bridge(struct device *dev) -{ - return dev->type == &cxl_nvdimm_bridge_type; -} -EXPORT_SYMBOL_NS_GPL(is_cxl_nvdimm_bridge, "CXL"); - -static int match_nvdimm_bridge(struct device *dev, void *data) -{ - return is_cxl_nvdimm_bridge(dev); -} - /** * cxl_find_nvdimm_bridge() - find a bridge device relative to a port * @port: any descendant port of an nvdimm-bridge associated @@ -75,7 +64,9 @@ struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_port *port) if (!cxl_root) return NULL; - dev = device_find_child(&cxl_root->port.dev, NULL, match_nvdimm_bridge); + dev = device_find_child(&cxl_root->port.dev, + &cxl_nvdimm_bridge_type, + device_match_type); if (!dev) return NULL; diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c index b98b1ccffd1c..e8d11a988fd9 100644 --- a/drivers/cxl/core/region.c +++ b/drivers/cxl/core/region.c @@ -778,7 +778,7 @@ out: return rc; } -static int check_commit_order(struct device *dev, const void *data) +static int check_commit_order(struct device *dev, void *data) { struct cxl_decoder *cxld = to_cxl_decoder(dev); @@ -792,7 +792,7 @@ static int check_commit_order(struct device *dev, const void *data) return 0; } -static int match_free_decoder(struct device *dev, void *data) +static int match_free_decoder(struct device *dev, const void *data) { struct cxl_port *port = to_cxl_port(dev->parent); struct cxl_decoder *cxld; @@ -824,9 +824,9 @@ static int match_free_decoder(struct device *dev, void *data) return 1; } -static int match_auto_decoder(struct device *dev, void *data) +static int match_auto_decoder(struct device *dev, const void *data) { - struct cxl_region_params *p = data; + const struct cxl_region_params *p = data; struct cxl_decoder *cxld; struct range *r; @@ -1733,10 +1733,12 @@ static struct cxl_port *next_port(struct cxl_port *port) return port->parent_dport->port; } -static int match_switch_decoder_by_range(struct device *dev, void *data) +static int match_switch_decoder_by_range(struct device *dev, + const void *data) { struct cxl_switch_decoder *cxlsd; - struct range *r1, *r2 = data; + const struct range *r1, *r2 = data; + if (!is_switch_decoder(dev)) return 0; @@ -3187,9 +3189,10 @@ err: return rc; } -static int match_root_decoder_by_range(struct device *dev, void *data) +static int match_root_decoder_by_range(struct device *dev, + const void *data) { - struct range *r1, *r2 = data; + const struct range *r1, *r2 = data; struct cxl_root_decoder *cxlrd; if (!is_root_decoder(dev)) @@ -3200,11 +3203,11 @@ static int match_root_decoder_by_range(struct device *dev, void *data) return range_contains(r1, r2); } -static int match_region_by_range(struct device *dev, void *data) +static int match_region_by_range(struct device *dev, const void *data) { struct cxl_region_params *p; struct cxl_region *cxlr; - struct range *r = data; + const struct range *r = data; int rc = 0; if (!is_cxl_region(dev)) diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h index f6015f24ad38..064a15cf559f 100644 --- a/drivers/cxl/cxl.h +++ b/drivers/cxl/cxl.h @@ -864,7 +864,6 @@ struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host, struct cxl_port *port); struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev); bool is_cxl_nvdimm(struct device *dev); -bool is_cxl_nvdimm_bridge(struct device *dev); int devm_cxl_add_nvdimm(struct cxl_port *parent_port, struct cxl_memdev *cxlmd); struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_port *port); diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c index a99fe35f1f0d..ec3e21ad2025 100644 --- a/drivers/firewire/core-device.c +++ b/drivers/firewire/core-device.c @@ -988,7 +988,7 @@ int fw_device_set_broadcast_channel(struct device *dev, void *gen) return 0; } -static int compare_configuration_rom(struct device *dev, void *data) +static int compare_configuration_rom(struct device *dev, const void *data) { const struct fw_device *old = fw_device(dev); const u32 *config_rom = data; @@ -1039,7 +1039,7 @@ static void fw_device_init(struct work_struct *work) // // serialize config_rom access. scoped_guard(rwsem_read, &fw_device_rwsem) { - found = device_find_child(card->device, (void *)device->config_rom, + found = device_find_child(card->device, device->config_rom, compare_configuration_rom); } if (found) { diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c index 157172a5f2b5..a3386bf36de5 100644 --- a/drivers/firmware/arm_scmi/bus.c +++ b/drivers/firmware/arm_scmi/bus.c @@ -238,10 +238,10 @@ static int scmi_dev_match(struct device *dev, const struct device_driver *drv) return 0; } -static int scmi_match_by_id_table(struct device *dev, void *data) +static int scmi_match_by_id_table(struct device *dev, const void *data) { struct scmi_device *sdev = to_scmi_dev(dev); - struct scmi_device_id *id_table = data; + const struct scmi_device_id *id_table = data; return sdev->protocol_id == id_table->protocol_id && (id_table->name && !strcmp(sdev->name, id_table->name)); diff --git a/drivers/firmware/arm_scmi/raw_mode.c b/drivers/firmware/arm_scmi/raw_mode.c index 9e89a6a763da..7cc0d616b8de 100644 --- a/drivers/firmware/arm_scmi/raw_mode.c +++ b/drivers/firmware/arm_scmi/raw_mode.c @@ -886,10 +886,8 @@ static __poll_t scmi_dbg_raw_mode_message_poll(struct file *filp, static int scmi_dbg_raw_mode_open(struct inode *inode, struct file *filp) { - u8 id; struct scmi_raw_mode_info *raw; struct scmi_dbg_raw_data *rd; - const char *id_str = filp->f_path.dentry->d_parent->d_name.name; if (!inode->i_private) return -ENODEV; @@ -915,8 +913,8 @@ static int scmi_dbg_raw_mode_open(struct inode *inode, struct file *filp) } /* Grab channel ID from debugfs entry naming if any */ - if (!kstrtou8(id_str, 16, &id)) - rd->chan_id = id; + /* not set - reassing 0 we already had after kzalloc() */ + rd->chan_id = debugfs_get_aux_num(filp); rd->raw = raw; filp->private_data = rd; @@ -1225,10 +1223,12 @@ void *scmi_raw_mode_init(const struct scmi_handle *handle, snprintf(cdir, 8, "0x%02X", channels[i]); chd = debugfs_create_dir(cdir, top_chans); - debugfs_create_file("message", 0600, chd, raw, + debugfs_create_file_aux_num("message", 0600, chd, + raw, channels[i], &scmi_dbg_raw_mode_message_fops); - debugfs_create_file("message_async", 0600, chd, raw, + debugfs_create_file_aux_num("message_async", 0600, chd, + raw, channels[i], &scmi_dbg_raw_mode_message_async_fops); } } diff --git a/drivers/firmware/efi/dev-path-parser.c b/drivers/firmware/efi/dev-path-parser.c index 937be269fee8..13ea141c0def 100644 --- a/drivers/firmware/efi/dev-path-parser.c +++ b/drivers/firmware/efi/dev-path-parser.c @@ -47,9 +47,9 @@ static long __init parse_acpi_path(const struct efi_dev_path *node, return 0; } -static int __init match_pci_dev(struct device *dev, void *data) +static int __init match_pci_dev(struct device *dev, const void *data) { - unsigned int devfn = *(unsigned int *)data; + unsigned int devfn = *(const unsigned int *)data; return dev_is_pci(dev) && to_pci_dev(dev)->devfn == devfn; } diff --git a/drivers/gpio/gpio-sim.c b/drivers/gpio/gpio-sim.c index 686ae3d11ba3..a086087ada17 100644 --- a/drivers/gpio/gpio-sim.c +++ b/drivers/gpio/gpio-sim.c @@ -413,11 +413,6 @@ static int gpio_sim_setup_sysfs(struct gpio_sim_chip *chip) return devm_add_action_or_reset(dev, gpio_sim_sysfs_remove, chip); } -static int gpio_sim_dev_match_fwnode(struct device *dev, void *data) -{ - return device_match_fwnode(dev, data); -} - static int gpio_sim_add_bank(struct fwnode_handle *swnode, struct device *dev) { struct gpio_sim_chip *chip; @@ -503,7 +498,7 @@ static int gpio_sim_add_bank(struct fwnode_handle *swnode, struct device *dev) if (ret) return ret; - chip->dev = device_find_child(dev, swnode, gpio_sim_dev_match_fwnode); + chip->dev = device_find_child(dev, swnode, device_match_fwnode); if (!chip->dev) return -ENODEV; diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index cd25e5afe55a..f22ad2882697 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c @@ -358,7 +358,7 @@ static const struct of_device_id mtk_drm_of_ids[] = { }; MODULE_DEVICE_TABLE(of, mtk_drm_of_ids); -static int mtk_drm_match(struct device *dev, void *data) +static int mtk_drm_match(struct device *dev, const void *data) { if (!strncmp(dev_name(dev), "mediatek-drm", sizeof("mediatek-drm") - 1)) return true; diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c index b7c0b1e3c23b..9703d60e9bbf 100644 --- a/drivers/hwmon/hwmon.c +++ b/drivers/hwmon/hwmon.c @@ -332,7 +332,7 @@ static int hwmon_attr_base(enum hwmon_sensor_types type) static DEFINE_MUTEX(hwmon_pec_mutex); -static int hwmon_match_device(struct device *dev, void *data) +static int hwmon_match_device(struct device *dev, const void *data) { return dev->class == &hwmon_class; } diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c index dd8c74f893db..2c1a60577728 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x-core.c +++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c @@ -6,6 +6,7 @@ #include <linux/acpi.h> #include <linux/bitops.h> #include <linux/kernel.h> +#include <linux/kvm_host.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/types.h> @@ -268,10 +269,28 @@ struct etm4_enable_arg { */ static void etm4x_prohibit_trace(struct etmv4_drvdata *drvdata) { + u64 trfcr; + /* If the CPU doesn't support FEAT_TRF, nothing to do */ if (!drvdata->trfcr) return; - cpu_prohibit_trace(); + + trfcr = drvdata->trfcr & ~(TRFCR_EL1_ExTRE | TRFCR_EL1_E0TRE); + + write_trfcr(trfcr); + kvm_tracing_set_el1_configuration(trfcr); +} + +static u64 etm4x_get_kern_user_filter(struct etmv4_drvdata *drvdata) +{ + u64 trfcr = drvdata->trfcr; + + if (drvdata->config.mode & ETM_MODE_EXCL_KERN) + trfcr &= ~TRFCR_EL1_ExTRE; + if (drvdata->config.mode & ETM_MODE_EXCL_USER) + trfcr &= ~TRFCR_EL1_E0TRE; + + return trfcr; } /* @@ -286,18 +305,28 @@ static void etm4x_prohibit_trace(struct etmv4_drvdata *drvdata) */ static void etm4x_allow_trace(struct etmv4_drvdata *drvdata) { - u64 trfcr = drvdata->trfcr; + u64 trfcr, guest_trfcr; /* If the CPU doesn't support FEAT_TRF, nothing to do */ - if (!trfcr) + if (!drvdata->trfcr) return; - if (drvdata->config.mode & ETM_MODE_EXCL_KERN) - trfcr &= ~TRFCR_ELx_ExTRE; - if (drvdata->config.mode & ETM_MODE_EXCL_USER) - trfcr &= ~TRFCR_ELx_E0TRE; + if (drvdata->config.mode & ETM_MODE_EXCL_HOST) + trfcr = drvdata->trfcr & ~(TRFCR_EL1_ExTRE | TRFCR_EL1_E0TRE); + else + trfcr = etm4x_get_kern_user_filter(drvdata); write_trfcr(trfcr); + + /* Set filters for guests and pass to KVM */ + if (drvdata->config.mode & ETM_MODE_EXCL_GUEST) + guest_trfcr = drvdata->trfcr & ~(TRFCR_EL1_ExTRE | TRFCR_EL1_E0TRE); + else + guest_trfcr = etm4x_get_kern_user_filter(drvdata); + + /* TRFCR_EL1 doesn't have CX so mask it out. */ + guest_trfcr &= ~TRFCR_EL2_CX; + kvm_tracing_set_el1_configuration(guest_trfcr); } #ifdef CONFIG_ETM4X_IMPDEF_FEATURE @@ -655,6 +684,12 @@ static int etm4_parse_event_config(struct coresight_device *csdev, if (attr->exclude_user) config->mode = ETM_MODE_EXCL_USER; + if (attr->exclude_host) + config->mode |= ETM_MODE_EXCL_HOST; + + if (attr->exclude_guest) + config->mode |= ETM_MODE_EXCL_GUEST; + /* Always start from the default config */ etm4_set_default_config(config); @@ -1141,9 +1176,9 @@ static void cpu_detect_trace_filtering(struct etmv4_drvdata *drvdata) * tracing at the kernel EL and EL0, forcing to use the * virtual time as the timestamp. */ - trfcr = (TRFCR_ELx_TS_VIRTUAL | - TRFCR_ELx_ExTRE | - TRFCR_ELx_E0TRE); + trfcr = (TRFCR_EL1_TS_VIRTUAL | + TRFCR_EL1_ExTRE | + TRFCR_EL1_E0TRE); /* If we are running at EL2, allow tracing the CONTEXTIDR_EL2. */ if (is_kernel_in_hyp_mode()) diff --git a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c index a9f19629f3f8..c767f8ae4cf1 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c +++ b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c @@ -2319,11 +2319,11 @@ static ssize_t ts_source_show(struct device *dev, goto out; } - switch (drvdata->trfcr & TRFCR_ELx_TS_MASK) { - case TRFCR_ELx_TS_VIRTUAL: - case TRFCR_ELx_TS_GUEST_PHYSICAL: - case TRFCR_ELx_TS_PHYSICAL: - val = FIELD_GET(TRFCR_ELx_TS_MASK, drvdata->trfcr); + switch (drvdata->trfcr & TRFCR_EL1_TS_MASK) { + case TRFCR_EL1_TS_VIRTUAL: + case TRFCR_EL1_TS_GUEST_PHYSICAL: + case TRFCR_EL1_TS_PHYSICAL: + val = FIELD_GET(TRFCR_EL1_TS_MASK, drvdata->trfcr); break; default: val = -1; diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h index 9e9165f62e81..1119762b5cec 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x.h +++ b/drivers/hwtracing/coresight/coresight-etm4x.h @@ -817,7 +817,7 @@ enum etm_impdef_type { * @s_ex_level: Secure ELs where tracing is supported. */ struct etmv4_config { - u32 mode; + u64 mode; u32 pe_sel; u32 cfg; u32 eventctrl0; diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h index 05f891ca6b5c..76403530f33e 100644 --- a/drivers/hwtracing/coresight/coresight-priv.h +++ b/drivers/hwtracing/coresight/coresight-priv.h @@ -42,6 +42,9 @@ extern const struct device_type coresight_dev_type[]; #define ETM_MODE_EXCL_KERN BIT(30) #define ETM_MODE_EXCL_USER BIT(31) +#define ETM_MODE_EXCL_HOST BIT(32) +#define ETM_MODE_EXCL_GUEST BIT(33) + struct cs_pair_attribute { struct device_attribute attr; u32 lo_off; diff --git a/drivers/hwtracing/coresight/coresight-self-hosted-trace.h b/drivers/hwtracing/coresight/coresight-self-hosted-trace.h index 53840a2c41f2..303d71911870 100644 --- a/drivers/hwtracing/coresight/coresight-self-hosted-trace.h +++ b/drivers/hwtracing/coresight/coresight-self-hosted-trace.h @@ -21,13 +21,4 @@ static inline void write_trfcr(u64 val) isb(); } -static inline u64 cpu_prohibit_trace(void) -{ - u64 trfcr = read_trfcr(); - - /* Prohibit tracing at EL0 & the kernel EL */ - write_trfcr(trfcr & ~(TRFCR_ELx_ExTRE | TRFCR_ELx_E0TRE)); - /* Return the original value of the TRFCR */ - return trfcr; -} #endif /* __CORESIGHT_SELF_HOSTED_TRACE_H */ diff --git a/drivers/hwtracing/coresight/coresight-trbe.c b/drivers/hwtracing/coresight/coresight-trbe.c index 919804b12a67..fff67aac8418 100644 --- a/drivers/hwtracing/coresight/coresight-trbe.c +++ b/drivers/hwtracing/coresight/coresight-trbe.c @@ -17,6 +17,7 @@ #include <asm/barrier.h> #include <asm/cpufeature.h> +#include <linux/kvm_host.h> #include <linux/vmalloc.h> #include "coresight-self-hosted-trace.h" @@ -221,6 +222,7 @@ static inline void set_trbe_enabled(struct trbe_cpudata *cpudata, u64 trblimitr) */ trblimitr |= TRBLIMITR_EL1_E; write_sysreg_s(trblimitr, SYS_TRBLIMITR_EL1); + kvm_enable_trbe(); /* Synchronize the TRBE enable event */ isb(); @@ -239,6 +241,7 @@ static inline void set_trbe_disabled(struct trbe_cpudata *cpudata) */ trblimitr &= ~TRBLIMITR_EL1_E; write_sysreg_s(trblimitr, SYS_TRBLIMITR_EL1); + kvm_disable_trbe(); if (trbe_needs_drain_after_disable(cpudata)) trbe_drain_buffer(); @@ -253,8 +256,8 @@ static void trbe_drain_and_disable_local(struct trbe_cpudata *cpudata) static void trbe_reset_local(struct trbe_cpudata *cpudata) { - trbe_drain_and_disable_local(cpudata); write_sysreg_s(0, SYS_TRBLIMITR_EL1); + trbe_drain_buffer(); write_sysreg_s(0, SYS_TRBPTR_EL1); write_sysreg_s(0, SYS_TRBBASER_EL1); write_sysreg_s(0, SYS_TRBSR_EL1); @@ -1110,6 +1113,16 @@ static bool is_perf_trbe(struct perf_output_handle *handle) return true; } +static u64 cpu_prohibit_trace(void) +{ + u64 trfcr = read_trfcr(); + + /* Prohibit tracing at EL0 & the kernel EL */ + write_trfcr(trfcr & ~(TRFCR_EL1_ExTRE | TRFCR_EL1_E0TRE)); + /* Return the original value of the TRFCR */ + return trfcr; +} + static irqreturn_t arm_trbe_irq_handler(int irq, void *dev) { struct perf_output_handle **handle_ptr = dev; diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c index c24ccefb015e..e2c2a2ef1c12 100644 --- a/drivers/i2c/i2c-core-base.c +++ b/drivers/i2c/i2c-core-base.c @@ -1310,7 +1310,7 @@ new_device_store(struct device *dev, struct device_attribute *attr, } static DEVICE_ATTR_WO(new_device); -static int __i2c_find_user_addr(struct device *dev, void *addrp) +static int __i2c_find_user_addr(struct device *dev, const void *addrp) { struct i2c_client *client = i2c_verify_client(dev); unsigned short addr = *(unsigned short *)addrp; diff --git a/drivers/leds/leds-turris-omnia.c b/drivers/leds/leds-turris-omnia.c index 7d3b24c8ecae..4fe1a9c0bc1b 100644 --- a/drivers/leds/leds-turris-omnia.c +++ b/drivers/leds/leds-turris-omnia.c @@ -438,7 +438,7 @@ static int omnia_mcu_get_features(const struct i2c_client *mcu_client) return reply; } -static int omnia_match_mcu_client(struct device *dev, void *data) +static int omnia_match_mcu_client(struct device *dev, const void *data) { struct i2c_client *client; diff --git a/drivers/media/pci/mgb4/mgb4_core.c b/drivers/media/pci/mgb4/mgb4_core.c index 8ceaed5c1453..f90ffc4dad52 100644 --- a/drivers/media/pci/mgb4/mgb4_core.c +++ b/drivers/media/pci/mgb4/mgb4_core.c @@ -125,7 +125,7 @@ static const struct hwmon_chip_info temp_chip_info = { }; #endif -static int match_i2c_adap(struct device *dev, void *data) +static int match_i2c_adap(struct device *dev, const void *data) { return i2c_verify_adapter(dev) ? 1 : 0; } @@ -141,7 +141,7 @@ static struct i2c_adapter *get_i2c_adap(struct platform_device *pdev) return dev ? to_i2c_adapter(dev) : NULL; } -static int match_spi_adap(struct device *dev, void *data) +static int match_spi_adap(struct device *dev, const void *data) { return to_spi_device(dev) ? 1 : 0; } diff --git a/drivers/mux/core.c b/drivers/mux/core.c index 78c0022697ec..02be4ba37257 100644 --- a/drivers/mux/core.c +++ b/drivers/mux/core.c @@ -42,7 +42,7 @@ struct mux_state { unsigned int state; }; -static struct class mux_class = { +static const struct class mux_class = { .name = "mux", }; diff --git a/drivers/net/bonding/bond_debugfs.c b/drivers/net/bonding/bond_debugfs.c index b19492a7f6ad..8adbec7c5084 100644 --- a/drivers/net/bonding/bond_debugfs.c +++ b/drivers/net/bonding/bond_debugfs.c @@ -63,13 +63,8 @@ void bond_debug_unregister(struct bonding *bond) void bond_debug_reregister(struct bonding *bond) { - struct dentry *d; - - d = debugfs_rename(bonding_debug_root, bond->debug_dir, - bonding_debug_root, bond->dev->name); - if (!IS_ERR(d)) { - bond->debug_dir = d; - } else { + int err = debugfs_change_name(bond->debug_dir, "%s", bond->dev->name); + if (err) { netdev_warn(bond->dev, "failed to reregister, so just unregister old one\n"); bond_debug_unregister(bond); } diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c index b0a6c96b6ef4..b35808d3d07f 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c @@ -505,21 +505,6 @@ void xgbe_debugfs_exit(struct xgbe_prv_data *pdata) void xgbe_debugfs_rename(struct xgbe_prv_data *pdata) { - char *buf; - - if (!pdata->xgbe_debugfs) - return; - - buf = kasprintf(GFP_KERNEL, "amd-xgbe-%s", pdata->netdev->name); - if (!buf) - return; - - if (!strcmp(pdata->xgbe_debugfs->d_name.name, buf)) - goto out; - - debugfs_rename(pdata->xgbe_debugfs->d_parent, pdata->xgbe_debugfs, - pdata->xgbe_debugfs->d_parent, buf); - -out: - kfree(buf); + debugfs_change_name(pdata->xgbe_debugfs, + "amd-xgbe-%s", pdata->netdev->name); } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c index 148144f5b61d..a1f9ec03c2ce 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c @@ -917,19 +917,18 @@ static void print_npa_qsize(struct seq_file *m, struct rvu_pfvf *pfvf) /* The 'qsize' entry dumps current Aura/Pool context Qsize * and each context's current enable/disable status in a bitmap. */ -static int rvu_dbg_qsize_display(struct seq_file *filp, void *unsused, +static int rvu_dbg_qsize_display(struct seq_file *s, void *unsused, int blktype) { - void (*print_qsize)(struct seq_file *filp, + void (*print_qsize)(struct seq_file *s, struct rvu_pfvf *pfvf) = NULL; - struct dentry *current_dir; struct rvu_pfvf *pfvf; struct rvu *rvu; int qsize_id; u16 pcifunc; int blkaddr; - rvu = filp->private; + rvu = s->private; switch (blktype) { case BLKTYPE_NPA: qsize_id = rvu->rvu_dbg.npa_qsize_id; @@ -945,32 +944,28 @@ static int rvu_dbg_qsize_display(struct seq_file *filp, void *unsused, return -EINVAL; } - if (blktype == BLKTYPE_NPA) { + if (blktype == BLKTYPE_NPA) blkaddr = BLKADDR_NPA; - } else { - current_dir = filp->file->f_path.dentry->d_parent; - blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ? - BLKADDR_NIX1 : BLKADDR_NIX0); - } + else + blkaddr = debugfs_get_aux_num(s->file); if (!rvu_dbg_is_valid_lf(rvu, blkaddr, qsize_id, &pcifunc)) return -EINVAL; pfvf = rvu_get_pfvf(rvu, pcifunc); - print_qsize(filp, pfvf); + print_qsize(s, pfvf); return 0; } -static ssize_t rvu_dbg_qsize_write(struct file *filp, +static ssize_t rvu_dbg_qsize_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos, int blktype) { char *blk_string = (blktype == BLKTYPE_NPA) ? "npa" : "nix"; - struct seq_file *seqfile = filp->private_data; + struct seq_file *seqfile = file->private_data; char *cmd_buf, *cmd_buf_tmp, *subtoken; struct rvu *rvu = seqfile->private; - struct dentry *current_dir; int blkaddr; u16 pcifunc; int ret, lf; @@ -996,13 +991,10 @@ static ssize_t rvu_dbg_qsize_write(struct file *filp, goto qsize_write_done; } - if (blktype == BLKTYPE_NPA) { + if (blktype == BLKTYPE_NPA) blkaddr = BLKADDR_NPA; - } else { - current_dir = filp->f_path.dentry->d_parent; - blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ? - BLKADDR_NIX1 : BLKADDR_NIX0); - } + else + blkaddr = debugfs_get_aux_num(file); if (!rvu_dbg_is_valid_lf(rvu, blkaddr, lf, &pcifunc)) { ret = -EINVAL; @@ -2704,8 +2696,8 @@ static void rvu_dbg_nix_init(struct rvu *rvu, int blkaddr) &rvu_dbg_nix_ndc_tx_hits_miss_fops); debugfs_create_file("ndc_rx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw, &rvu_dbg_nix_ndc_rx_hits_miss_fops); - debugfs_create_file("qsize", 0600, rvu->rvu_dbg.nix, rvu, - &rvu_dbg_nix_qsize_fops); + debugfs_create_file_aux_num("qsize", 0600, rvu->rvu_dbg.nix, rvu, + blkaddr, &rvu_dbg_nix_qsize_fops); debugfs_create_file("ingress_policer_ctx", 0600, rvu->rvu_dbg.nix, nix_hw, &rvu_dbg_nix_band_prof_ctx_fops); debugfs_create_file("ingress_policer_rsrc", 0600, rvu->rvu_dbg.nix, nix_hw, @@ -2854,28 +2846,14 @@ static int cgx_print_stats(struct seq_file *s, int lmac_id) return err; } -static int rvu_dbg_derive_lmacid(struct seq_file *filp, int *lmac_id) +static int rvu_dbg_derive_lmacid(struct seq_file *s) { - struct dentry *current_dir; - char *buf; - - current_dir = filp->file->f_path.dentry->d_parent; - buf = strrchr(current_dir->d_name.name, 'c'); - if (!buf) - return -EINVAL; - - return kstrtoint(buf + 1, 10, lmac_id); + return debugfs_get_aux_num(s->file); } -static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused) +static int rvu_dbg_cgx_stat_display(struct seq_file *s, void *unused) { - int lmac_id, err; - - err = rvu_dbg_derive_lmacid(filp, &lmac_id); - if (!err) - return cgx_print_stats(filp, lmac_id); - - return err; + return cgx_print_stats(s, rvu_dbg_derive_lmacid(s)); } RVU_DEBUG_SEQ_FOPS(cgx_stat, cgx_stat_display, NULL); @@ -2933,15 +2911,9 @@ static int cgx_print_dmac_flt(struct seq_file *s, int lmac_id) return 0; } -static int rvu_dbg_cgx_dmac_flt_display(struct seq_file *filp, void *unused) +static int rvu_dbg_cgx_dmac_flt_display(struct seq_file *s, void *unused) { - int err, lmac_id; - - err = rvu_dbg_derive_lmacid(filp, &lmac_id); - if (!err) - return cgx_print_dmac_flt(filp, lmac_id); - - return err; + return cgx_print_dmac_flt(s, rvu_dbg_derive_lmacid(s)); } RVU_DEBUG_SEQ_FOPS(cgx_dmac_flt, cgx_dmac_flt_display, NULL); @@ -2980,10 +2952,10 @@ static void rvu_dbg_cgx_init(struct rvu *rvu) rvu->rvu_dbg.lmac = debugfs_create_dir(dname, rvu->rvu_dbg.cgx); - debugfs_create_file("stats", 0600, rvu->rvu_dbg.lmac, - cgx, &rvu_dbg_cgx_stat_fops); - debugfs_create_file("mac_filter", 0600, - rvu->rvu_dbg.lmac, cgx, + debugfs_create_file_aux_num("stats", 0600, rvu->rvu_dbg.lmac, + cgx, lmac_id, &rvu_dbg_cgx_stat_fops); + debugfs_create_file_aux_num("mac_filter", 0600, + rvu->rvu_dbg.lmac, cgx, lmac_id, &rvu_dbg_cgx_dmac_flt_fops); } } diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index 25bf6ec44289..a1bada9eaaf6 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c @@ -3742,10 +3742,7 @@ static int skge_device_event(struct notifier_block *unused, skge = netdev_priv(dev); switch (event) { case NETDEV_CHANGENAME: - if (skge->debugfs) - skge->debugfs = debugfs_rename(skge_debug, - skge->debugfs, - skge_debug, dev->name); + debugfs_change_name(skge->debugfs, "%s", dev->name); break; case NETDEV_GOING_DOWN: diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index 988fa28cfb5f..d7121c836508 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -4494,10 +4494,7 @@ static int sky2_device_event(struct notifier_block *unused, switch (event) { case NETDEV_CHANGENAME: - if (sky2->debugfs) { - sky2->debugfs = debugfs_rename(sky2_debug, sky2->debugfs, - sky2_debug, dev->name); - } + debugfs_change_name(sky2->debugfs, "%s", dev->name); break; case NETDEV_GOING_DOWN: diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index edbf8994455d..5212dc439b1d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -6535,11 +6535,7 @@ static int stmmac_device_event(struct notifier_block *unused, switch (event) { case NETDEV_CHANGENAME: - if (priv->dbgfs_dir) - priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir, - priv->dbgfs_dir, - stmmac_fs_dir, - dev->name); + debugfs_change_name(priv->dbgfs_dir, "%s", dev->name); break; } done: diff --git a/drivers/net/netdevsim/hwstats.c b/drivers/net/netdevsim/hwstats.c index 0e58aa7f0374..66b3215db3ac 100644 --- a/drivers/net/netdevsim/hwstats.c +++ b/drivers/net/netdevsim/hwstats.c @@ -331,7 +331,6 @@ enum nsim_dev_hwstats_do { }; struct nsim_dev_hwstats_fops { - const struct file_operations fops; enum nsim_dev_hwstats_do action; enum netdev_offload_xstats_type type; }; @@ -342,13 +341,12 @@ nsim_dev_hwstats_do_write(struct file *file, size_t count, loff_t *ppos) { struct nsim_dev_hwstats *hwstats = file->private_data; - struct nsim_dev_hwstats_fops *hwsfops; + const struct nsim_dev_hwstats_fops *hwsfops; struct list_head *hwsdev_list; int ifindex; int err; - hwsfops = container_of(debugfs_real_fops(file), - struct nsim_dev_hwstats_fops, fops); + hwsfops = debugfs_get_aux(file); err = kstrtoint_from_user(data, count, 0, &ifindex); if (err) @@ -381,14 +379,13 @@ nsim_dev_hwstats_do_write(struct file *file, return count; } +static struct debugfs_short_fops debugfs_ops = { + .write = nsim_dev_hwstats_do_write, + .llseek = generic_file_llseek, +}; + #define NSIM_DEV_HWSTATS_FOPS(ACTION, TYPE) \ { \ - .fops = { \ - .open = simple_open, \ - .write = nsim_dev_hwstats_do_write, \ - .llseek = generic_file_llseek, \ - .owner = THIS_MODULE, \ - }, \ .action = ACTION, \ .type = TYPE, \ } @@ -433,12 +430,12 @@ int nsim_dev_hwstats_init(struct nsim_dev *nsim_dev) goto err_remove_hwstats_recursive; } - debugfs_create_file("enable_ifindex", 0200, hwstats->l3_ddir, hwstats, - &nsim_dev_hwstats_l3_enable_fops.fops); - debugfs_create_file("disable_ifindex", 0200, hwstats->l3_ddir, hwstats, - &nsim_dev_hwstats_l3_disable_fops.fops); - debugfs_create_file("fail_next_enable", 0200, hwstats->l3_ddir, hwstats, - &nsim_dev_hwstats_l3_fail_fops.fops); + debugfs_create_file_aux("enable_ifindex", 0200, hwstats->l3_ddir, hwstats, + &nsim_dev_hwstats_l3_enable_fops, &debugfs_ops); + debugfs_create_file_aux("disable_ifindex", 0200, hwstats->l3_ddir, hwstats, + &nsim_dev_hwstats_l3_disable_fops, &debugfs_ops); + debugfs_create_file_aux("fail_next_enable", 0200, hwstats->l3_ddir, hwstats, + &nsim_dev_hwstats_l3_fail_fops, &debugfs_ops); INIT_DELAYED_WORK(&hwstats->traffic_dw, &nsim_dev_hwstats_traffic_work); diff --git a/drivers/net/wireless/ath/carl9170/debug.c b/drivers/net/wireless/ath/carl9170/debug.c index bb40889d7c72..2d734567000a 100644 --- a/drivers/net/wireless/ath/carl9170/debug.c +++ b/drivers/net/wireless/ath/carl9170/debug.c @@ -54,7 +54,6 @@ struct carl9170_debugfs_fops { char *(*read)(struct ar9170 *ar, char *buf, size_t bufsize, ssize_t *len); ssize_t (*write)(struct ar9170 *aru, const char *buf, size_t size); - const struct file_operations fops; enum carl9170_device_state req_dev_state; }; @@ -62,7 +61,7 @@ struct carl9170_debugfs_fops { static ssize_t carl9170_debugfs_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { - struct carl9170_debugfs_fops *dfops; + const struct carl9170_debugfs_fops *dfops; struct ar9170 *ar; char *buf = NULL, *res_buf = NULL; ssize_t ret = 0; @@ -75,8 +74,7 @@ static ssize_t carl9170_debugfs_read(struct file *file, char __user *userbuf, if (!ar) return -ENODEV; - dfops = container_of(debugfs_real_fops(file), - struct carl9170_debugfs_fops, fops); + dfops = debugfs_get_aux(file); if (!dfops->read) return -ENOSYS; @@ -113,7 +111,7 @@ out_free: static ssize_t carl9170_debugfs_write(struct file *file, const char __user *userbuf, size_t count, loff_t *ppos) { - struct carl9170_debugfs_fops *dfops; + const struct carl9170_debugfs_fops *dfops; struct ar9170 *ar; char *buf = NULL; int err = 0; @@ -128,8 +126,7 @@ static ssize_t carl9170_debugfs_write(struct file *file, if (!ar) return -ENODEV; - dfops = container_of(debugfs_real_fops(file), - struct carl9170_debugfs_fops, fops); + dfops = debugfs_get_aux(file); if (!dfops->write) return -ENOSYS; @@ -165,6 +162,11 @@ out_free: return err; } +static struct debugfs_short_fops debugfs_fops = { + .read = carl9170_debugfs_read, + .write = carl9170_debugfs_write, +}; + #define __DEBUGFS_DECLARE_FILE(name, _read, _write, _read_bufsize, \ _attr, _dstate) \ static const struct carl9170_debugfs_fops carl_debugfs_##name ##_ops = {\ @@ -173,12 +175,6 @@ static const struct carl9170_debugfs_fops carl_debugfs_##name ##_ops = {\ .write = _write, \ .attr = _attr, \ .req_dev_state = _dstate, \ - .fops = { \ - .open = simple_open, \ - .read = carl9170_debugfs_read, \ - .write = carl9170_debugfs_write, \ - .owner = THIS_MODULE \ - }, \ } #define DEBUGFS_DECLARE_FILE(name, _read, _write, _read_bufsize, _attr) \ @@ -816,9 +812,9 @@ void carl9170_debugfs_register(struct ar9170 *ar) ar->hw->wiphy->debugfsdir); #define DEBUGFS_ADD(name) \ - debugfs_create_file(#name, carl_debugfs_##name ##_ops.attr, \ - ar->debug_dir, ar, \ - &carl_debugfs_##name ## _ops.fops) + debugfs_create_file_aux(#name, carl_debugfs_##name ##_ops.attr, \ + ar->debug_dir, ar, &carl_debugfs_##name ## _ops, \ + &debugfs_fops) DEBUGFS_ADD(usb_tx_anch_urbs); DEBUGFS_ADD(usb_rx_pool_urbs); diff --git a/drivers/net/wireless/broadcom/b43/debugfs.c b/drivers/net/wireless/broadcom/b43/debugfs.c index efa98444e3fb..5a49970afc8c 100644 --- a/drivers/net/wireless/broadcom/b43/debugfs.c +++ b/drivers/net/wireless/broadcom/b43/debugfs.c @@ -30,7 +30,6 @@ static struct dentry *rootdir; struct b43_debugfs_fops { ssize_t (*read)(struct b43_wldev *dev, char *buf, size_t bufsize); int (*write)(struct b43_wldev *dev, const char *buf, size_t count); - struct file_operations fops; /* Offset of struct b43_dfs_file in struct b43_dfsentry */ size_t file_struct_offset; }; @@ -491,7 +490,7 @@ static ssize_t b43_debugfs_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { struct b43_wldev *dev; - struct b43_debugfs_fops *dfops; + const struct b43_debugfs_fops *dfops; struct b43_dfs_file *dfile; ssize_t ret; char *buf; @@ -511,8 +510,7 @@ static ssize_t b43_debugfs_read(struct file *file, char __user *userbuf, goto out_unlock; } - dfops = container_of(debugfs_real_fops(file), - struct b43_debugfs_fops, fops); + dfops = debugfs_get_aux(file); if (!dfops->read) { err = -ENOSYS; goto out_unlock; @@ -555,7 +553,7 @@ static ssize_t b43_debugfs_write(struct file *file, size_t count, loff_t *ppos) { struct b43_wldev *dev; - struct b43_debugfs_fops *dfops; + const struct b43_debugfs_fops *dfops; char *buf; int err = 0; @@ -573,8 +571,7 @@ static ssize_t b43_debugfs_write(struct file *file, goto out_unlock; } - dfops = container_of(debugfs_real_fops(file), - struct b43_debugfs_fops, fops); + dfops = debugfs_get_aux(file); if (!dfops->write) { err = -ENOSYS; goto out_unlock; @@ -602,16 +599,16 @@ out_unlock: } +static struct debugfs_short_fops debugfs_ops = { + .read = b43_debugfs_read, + .write = b43_debugfs_write, + .llseek = generic_file_llseek, +}; + #define B43_DEBUGFS_FOPS(name, _read, _write) \ static struct b43_debugfs_fops fops_##name = { \ .read = _read, \ .write = _write, \ - .fops = { \ - .open = simple_open, \ - .read = b43_debugfs_read, \ - .write = b43_debugfs_write, \ - .llseek = generic_file_llseek, \ - }, \ .file_struct_offset = offsetof(struct b43_dfsentry, \ file_##name), \ } @@ -703,9 +700,9 @@ void b43_debugfs_add_device(struct b43_wldev *dev) #define ADD_FILE(name, mode) \ do { \ - debugfs_create_file(__stringify(name), \ + debugfs_create_file_aux(__stringify(name), \ mode, e->subdir, dev, \ - &fops_##name.fops); \ + &fops_##name, &debugfs_ops); \ } while (0) diff --git a/drivers/net/wireless/broadcom/b43legacy/debugfs.c b/drivers/net/wireless/broadcom/b43legacy/debugfs.c index 6b0e8d117061..5d04bcc216e5 100644 --- a/drivers/net/wireless/broadcom/b43legacy/debugfs.c +++ b/drivers/net/wireless/broadcom/b43legacy/debugfs.c @@ -31,7 +31,6 @@ static struct dentry *rootdir; struct b43legacy_debugfs_fops { ssize_t (*read)(struct b43legacy_wldev *dev, char *buf, size_t bufsize); int (*write)(struct b43legacy_wldev *dev, const char *buf, size_t count); - struct file_operations fops; /* Offset of struct b43legacy_dfs_file in struct b43legacy_dfsentry */ size_t file_struct_offset; /* Take wl->irq_lock before calling read/write? */ @@ -188,7 +187,7 @@ static ssize_t b43legacy_debugfs_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { struct b43legacy_wldev *dev; - struct b43legacy_debugfs_fops *dfops; + const struct b43legacy_debugfs_fops *dfops; struct b43legacy_dfs_file *dfile; ssize_t ret; char *buf; @@ -208,8 +207,7 @@ static ssize_t b43legacy_debugfs_read(struct file *file, char __user *userbuf, goto out_unlock; } - dfops = container_of(debugfs_real_fops(file), - struct b43legacy_debugfs_fops, fops); + dfops = debugfs_get_aux(file); if (!dfops->read) { err = -ENOSYS; goto out_unlock; @@ -257,7 +255,7 @@ static ssize_t b43legacy_debugfs_write(struct file *file, size_t count, loff_t *ppos) { struct b43legacy_wldev *dev; - struct b43legacy_debugfs_fops *dfops; + const struct b43legacy_debugfs_fops *dfops; char *buf; int err = 0; @@ -275,8 +273,7 @@ static ssize_t b43legacy_debugfs_write(struct file *file, goto out_unlock; } - dfops = container_of(debugfs_real_fops(file), - struct b43legacy_debugfs_fops, fops); + dfops = debugfs_get_aux(file); if (!dfops->write) { err = -ENOSYS; goto out_unlock; @@ -308,17 +305,16 @@ out_unlock: return err ? err : count; } +static struct debugfs_short_fops debugfs_ops = { + .read = b43legacy_debugfs_read, + .write = b43legacy_debugfs_write, + .llseek = generic_file_llseek +}; #define B43legacy_DEBUGFS_FOPS(name, _read, _write, _take_irqlock) \ static struct b43legacy_debugfs_fops fops_##name = { \ .read = _read, \ .write = _write, \ - .fops = { \ - .open = simple_open, \ - .read = b43legacy_debugfs_read, \ - .write = b43legacy_debugfs_write, \ - .llseek = generic_file_llseek, \ - }, \ .file_struct_offset = offsetof(struct b43legacy_dfsentry, \ file_##name), \ .take_irqlock = _take_irqlock, \ @@ -386,9 +382,9 @@ void b43legacy_debugfs_add_device(struct b43legacy_wldev *dev) #define ADD_FILE(name, mode) \ do { \ - debugfs_create_file(__stringify(name), mode, \ + debugfs_create_file_aux(__stringify(name), mode, \ e->subdir, dev, \ - &fops_##name.fops); \ + &fops_##name, &debugfs_ops); \ } while (0) diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c index 2237715e42eb..0ccf4a9e523a 100644 --- a/drivers/nvdimm/bus.c +++ b/drivers/nvdimm/bus.c @@ -1212,7 +1212,7 @@ enum nd_ioctl_mode { DIMM_IOCTL, }; -static int match_dimm(struct device *dev, void *data) +static int match_dimm(struct device *dev, const void *data) { long id = (long) data; diff --git a/drivers/nvdimm/claim.c b/drivers/nvdimm/claim.c index 030dbde6b088..9e84ab411564 100644 --- a/drivers/nvdimm/claim.c +++ b/drivers/nvdimm/claim.c @@ -67,13 +67,6 @@ bool nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach, return claimed; } -static int namespace_match(struct device *dev, void *data) -{ - char *name = data; - - return strcmp(name, dev_name(dev)) == 0; -} - static bool is_idle(struct device *dev, struct nd_namespace_common *ndns) { struct nd_region *nd_region = to_nd_region(dev->parent); @@ -168,7 +161,7 @@ ssize_t nd_namespace_store(struct device *dev, goto out; } - found = device_find_child(dev->parent, name, namespace_match); + found = device_find_child_by_name(dev->parent, name); if (!found) { dev_dbg(dev, "'%s' not found under %s\n", name, dev_name(dev->parent)); diff --git a/drivers/of/unittest-data/tests-platform.dtsi b/drivers/of/unittest-data/tests-platform.dtsi index cd310b26b50c..4171f43cf01c 100644 --- a/drivers/of/unittest-data/tests-platform.dtsi +++ b/drivers/of/unittest-data/tests-platform.dtsi @@ -33,6 +33,11 @@ reg = <0x100>; }; }; + + test-device@2 { + compatible = "test,rust-device"; + reg = <0x2>; + }; }; platform-tests-2 { diff --git a/drivers/opp/debugfs.c b/drivers/opp/debugfs.c index 105de7c3274a..8fc6238b1728 100644 --- a/drivers/opp/debugfs.c +++ b/drivers/opp/debugfs.c @@ -217,7 +217,7 @@ static void opp_migrate_dentry(struct opp_device *opp_dev, { struct opp_device *new_dev = NULL, *iter; const struct device *dev; - struct dentry *dentry; + int err; /* Look for next opp-dev */ list_for_each_entry(iter, &opp_table->dev_list, node) @@ -234,16 +234,14 @@ static void opp_migrate_dentry(struct opp_device *opp_dev, opp_set_dev_name(dev, opp_table->dentry_name); - dentry = debugfs_rename(rootdir, opp_dev->dentry, rootdir, - opp_table->dentry_name); - if (IS_ERR(dentry)) { + err = debugfs_change_name(opp_dev->dentry, "%s", opp_table->dentry_name); + if (err) { dev_err(dev, "%s: Failed to rename link from: %s to %s\n", __func__, dev_name(opp_dev->dev), dev_name(dev)); return; } - new_dev->dentry = dentry; - opp_table->dentry = dentry; + new_dev->dentry = opp_table->dentry = opp_dev->dentry; } /** diff --git a/drivers/parport/parport_serial.c b/drivers/parport/parport_serial.c index 3644997a8342..24d4f3a3ec3d 100644 --- a/drivers/parport/parport_serial.c +++ b/drivers/parport/parport_serial.c @@ -266,10 +266,14 @@ static struct pci_device_id parport_serial_pci_tbl[] = { { 0x1409, 0x7168, 0x1409, 0xd079, 0, 0, timedia_9079c }, /* WCH CARDS */ - { 0x4348, 0x5053, PCI_ANY_ID, PCI_ANY_ID, 0, 0, wch_ch353_1s1p}, - { 0x4348, 0x7053, 0x4348, 0x3253, 0, 0, wch_ch353_2s1p}, - { 0x1c00, 0x3050, 0x1c00, 0x3050, 0, 0, wch_ch382_0s1p}, - { 0x1c00, 0x3250, 0x1c00, 0x3250, 0, 0, wch_ch382_2s1p}, + { PCI_VENDOR_ID_WCHCN, PCI_DEVICE_ID_WCHCN_CH353_1S1P, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, wch_ch353_1s1p }, + { PCI_VENDOR_ID_WCHCN, PCI_DEVICE_ID_WCHCN_CH353_2S1P, + 0x4348, 0x3253, 0, 0, wch_ch353_2s1p }, + { PCI_VENDOR_ID_WCHIC, PCI_DEVICE_ID_WCHIC_CH382_0S1P, + 0x1c00, 0x3050, 0, 0, wch_ch382_0s1p }, + { PCI_VENDOR_ID_WCHIC, PCI_DEVICE_ID_WCHIC_CH382_2S1P, + 0x1c00, 0x3250, 0, 0, wch_ch382_2s1p }, /* BrainBoxes PX272/PX306 MIO card */ { PCI_VENDOR_ID_INTASHIELD, 0x4100, diff --git a/drivers/phy/mediatek/phy-mtk-tphy.c b/drivers/phy/mediatek/phy-mtk-tphy.c index 3f7095ec5978..a496fbe3352b 100644 --- a/drivers/phy/mediatek/phy-mtk-tphy.c +++ b/drivers/phy/mediatek/phy-mtk-tphy.c @@ -381,17 +381,12 @@ static const char *const u3_phy_files[] = { static int u2_phy_params_show(struct seq_file *sf, void *unused) { struct mtk_phy_instance *inst = sf->private; - const char *fname = file_dentry(sf->file)->d_iname; struct u2phy_banks *u2_banks = &inst->u2_banks; void __iomem *com = u2_banks->com; u32 max = 0; u32 tmp = 0; u32 val = 0; - int ret; - - ret = match_string(u2_phy_files, ARRAY_SIZE(u2_phy_files), fname); - if (ret < 0) - return ret; + int ret = debugfs_get_aux_num(sf->file); switch (ret) { case U2P_EYE_VRT: @@ -438,7 +433,7 @@ static int u2_phy_params_show(struct seq_file *sf, void *unused) break; } - seq_printf(sf, "%s : %d [0, %d]\n", fname, val, max); + seq_printf(sf, "%s : %d [0, %d]\n", u2_phy_files[ret], val, max); return 0; } @@ -451,23 +446,18 @@ static int u2_phy_params_open(struct inode *inode, struct file *file) static ssize_t u2_phy_params_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos) { - const char *fname = file_dentry(file)->d_iname; struct seq_file *sf = file->private_data; struct mtk_phy_instance *inst = sf->private; struct u2phy_banks *u2_banks = &inst->u2_banks; void __iomem *com = u2_banks->com; ssize_t rc; u32 val; - int ret; + int ret = debugfs_get_aux_num(file); rc = kstrtouint_from_user(ubuf, USER_BUF_LEN(count), 0, &val); if (rc) return rc; - ret = match_string(u2_phy_files, ARRAY_SIZE(u2_phy_files), fname); - if (ret < 0) - return (ssize_t)ret; - switch (ret) { case U2P_EYE_VRT: mtk_phy_update_field(com + U3P_USBPHYACR1, PA1_RG_VRT_SEL, val); @@ -516,23 +506,18 @@ static void u2_phy_dbgfs_files_create(struct mtk_phy_instance *inst) int i; for (i = 0; i < count; i++) - debugfs_create_file(u2_phy_files[i], 0644, inst->phy->debugfs, - inst, &u2_phy_fops); + debugfs_create_file_aux_num(u2_phy_files[i], 0644, inst->phy->debugfs, + inst, i, &u2_phy_fops); } static int u3_phy_params_show(struct seq_file *sf, void *unused) { struct mtk_phy_instance *inst = sf->private; - const char *fname = file_dentry(sf->file)->d_iname; struct u3phy_banks *u3_banks = &inst->u3_banks; u32 val = 0; u32 max = 0; u32 tmp; - int ret; - - ret = match_string(u3_phy_files, ARRAY_SIZE(u3_phy_files), fname); - if (ret < 0) - return ret; + int ret = debugfs_get_aux_num(sf->file); switch (ret) { case U3P_EFUSE_EN: @@ -564,7 +549,7 @@ static int u3_phy_params_show(struct seq_file *sf, void *unused) break; } - seq_printf(sf, "%s : %d [0, %d]\n", fname, val, max); + seq_printf(sf, "%s : %d [0, %d]\n", u3_phy_files[ret], val, max); return 0; } @@ -577,23 +562,18 @@ static int u3_phy_params_open(struct inode *inode, struct file *file) static ssize_t u3_phy_params_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos) { - const char *fname = file_dentry(file)->d_iname; struct seq_file *sf = file->private_data; struct mtk_phy_instance *inst = sf->private; struct u3phy_banks *u3_banks = &inst->u3_banks; void __iomem *phyd = u3_banks->phyd; ssize_t rc; u32 val; - int ret; + int ret = debugfs_get_aux_num(sf->file); rc = kstrtouint_from_user(ubuf, USER_BUF_LEN(count), 0, &val); if (rc) return rc; - ret = match_string(u3_phy_files, ARRAY_SIZE(u3_phy_files), fname); - if (ret < 0) - return (ssize_t)ret; - switch (ret) { case U3P_EFUSE_EN: mtk_phy_update_field(phyd + U3P_U3_PHYD_RSV, @@ -636,8 +616,8 @@ static void u3_phy_dbgfs_files_create(struct mtk_phy_instance *inst) int i; for (i = 0; i < count; i++) - debugfs_create_file(u3_phy_files[i], 0644, inst->phy->debugfs, - inst, &u3_phy_fops); + debugfs_create_file_aux_num(u3_phy_files[i], 0644, inst->phy->debugfs, + inst, i, &u3_phy_fops); } static int phy_type_show(struct seq_file *sf, void *unused) diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c index 99d0bc693315..ccd54c089bab 100644 --- a/drivers/pwm/core.c +++ b/drivers/pwm/core.c @@ -1285,7 +1285,7 @@ static int pwm_export_child(struct device *pwmchip_dev, struct pwm_device *pwm) return 0; } -static int pwm_unexport_match(struct device *pwm_dev, void *data) +static int pwm_unexport_match(struct device *pwm_dev, const void *data) { return pwm_from_dev(pwm_dev) == data; } diff --git a/drivers/rpmsg/rpmsg_core.c b/drivers/rpmsg/rpmsg_core.c index 712c06c02696..207b64c0a2fe 100644 --- a/drivers/rpmsg/rpmsg_core.c +++ b/drivers/rpmsg/rpmsg_core.c @@ -377,9 +377,9 @@ EXPORT_SYMBOL(rpmsg_get_mtu); * this is used to make sure we're not creating rpmsg devices for channels * that already exist. */ -static int rpmsg_device_match(struct device *dev, void *data) +static int rpmsg_device_match(struct device *dev, const void *data) { - struct rpmsg_channel_info *chinfo = data; + const struct rpmsg_channel_info *chinfo = data; struct rpmsg_device *rpdev = to_rpmsg_device(dev); if (chinfo->src != RPMSG_ADDR_ANY && chinfo->src != rpdev->src) diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c index cba2d048a96b..4a0b3f19bd8e 100644 --- a/drivers/s390/cio/chp.c +++ b/drivers/s390/cio/chp.c @@ -128,7 +128,7 @@ static int s390_vary_chpid(struct chp_id chpid, int on) * Channel measurement related functions */ static ssize_t measurement_chars_read(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin_attr, + const struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct channel_path *chp; @@ -142,11 +142,11 @@ static ssize_t measurement_chars_read(struct file *filp, struct kobject *kobj, return memory_read_from_buffer(buf, count, &off, &chp->cmg_chars, sizeof(chp->cmg_chars)); } -static BIN_ATTR_ADMIN_RO(measurement_chars, sizeof(struct cmg_chars)); +static const BIN_ATTR_ADMIN_RO(measurement_chars, sizeof(struct cmg_chars)); static ssize_t measurement_chars_full_read(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin_attr, + const struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct channel_path *chp = to_channelpath(kobj_to_dev(kobj)); @@ -196,22 +196,22 @@ static ssize_t chp_measurement_copy_block(void *buf, loff_t off, size_t count, } static ssize_t measurement_read(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin_attr, + const struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { return chp_measurement_copy_block(buf, off, count, kobj, false); } -static BIN_ATTR_ADMIN_RO(measurement, sizeof(struct cmg_entry)); +static const BIN_ATTR_ADMIN_RO(measurement, sizeof(struct cmg_entry)); static ssize_t ext_measurement_read(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin_attr, + const struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { return chp_measurement_copy_block(buf, off, count, kobj, true); } -static BIN_ATTR_ADMIN_RO(ext_measurement, sizeof(struct cmg_ext_entry)); +static const BIN_ATTR_ADMIN_RO(ext_measurement, sizeof(struct cmg_ext_entry)); -static struct bin_attribute *measurement_attrs[] = { +static const struct bin_attribute *measurement_attrs[] = { &bin_attr_measurement_chars, &bin_attr_measurement_chars_full, &bin_attr_measurement, @@ -435,7 +435,7 @@ static ssize_t speed_bps_show(struct device *dev, static DEVICE_ATTR_RO(speed_bps); static ssize_t util_string_read(struct file *filp, struct kobject *kobj, - struct bin_attribute *attr, char *buf, + const struct bin_attribute *attr, char *buf, loff_t off, size_t count) { struct channel_path *chp = to_channelpath(kobj_to_dev(kobj)); @@ -448,10 +448,10 @@ static ssize_t util_string_read(struct file *filp, struct kobject *kobj, return rc; } -static BIN_ATTR_RO(util_string, - sizeof(((struct channel_path_desc_fmt3 *)0)->util_str)); +static const BIN_ATTR_RO(util_string, + sizeof(((struct channel_path_desc_fmt3 *)0)->util_str)); -static struct bin_attribute *chp_bin_attrs[] = { +static const struct bin_attribute *const chp_bin_attrs[] = { &bin_attr_util_string, NULL, }; @@ -468,9 +468,9 @@ static struct attribute *chp_attrs[] = { &dev_attr_speed_bps.attr, NULL, }; -static struct attribute_group chp_attr_group = { +static const struct attribute_group chp_attr_group = { .attrs = chp_attrs, - .bin_attrs = chp_bin_attrs, + .bin_attrs_new = chp_bin_attrs, }; static const struct attribute_group *chp_attr_groups[] = { &chp_attr_group, diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index 062ec5f24758..6b0e6b4cd8af 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c @@ -7189,7 +7189,8 @@ exit_new_nt_list: * 1: if flashnode entry is non-persistent * 0: if flashnode entry is persistent **/ -static int qla4xxx_sysfs_ddb_is_non_persistent(struct device *dev, void *data) +static int qla4xxx_sysfs_ddb_is_non_persistent(struct device *dev, + const void *data) { struct iscsi_bus_flash_session *fnode_sess; diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index fec8ffb8d653..9c347c64c315 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -1324,7 +1324,7 @@ EXPORT_SYMBOL_GPL(iscsi_create_flashnode_conn); * 1 on success * 0 on failure */ -static int iscsi_is_flashnode_conn_dev(struct device *dev, void *data) +static int iscsi_is_flashnode_conn_dev(struct device *dev, const void *data) { return dev->bus == &iscsi_flashnode_bus; } @@ -1335,7 +1335,7 @@ static int iscsi_destroy_flashnode_conn(struct iscsi_bus_flash_conn *fnode_conn) return 0; } -static int flashnode_match_index(struct device *dev, void *data) +static int flashnode_match_index(struct device *dev, const void *data) { struct iscsi_bus_flash_session *fnode_sess = NULL; int ret = 0; @@ -1344,7 +1344,7 @@ static int flashnode_match_index(struct device *dev, void *data) goto exit_match_index; fnode_sess = iscsi_dev_to_flash_session(dev); - ret = (fnode_sess->target_id == *((int *)data)) ? 1 : 0; + ret = (fnode_sess->target_id == *((const int *)data)) ? 1 : 0; exit_match_index: return ret; @@ -1389,8 +1389,8 @@ iscsi_get_flashnode_by_index(struct Scsi_Host *shost, uint32_t idx) * %NULL on failure */ struct device * -iscsi_find_flashnode_sess(struct Scsi_Host *shost, void *data, - int (*fn)(struct device *dev, void *data)) +iscsi_find_flashnode_sess(struct Scsi_Host *shost, const void *data, + device_match_t fn) { return device_find_child(&shost->shost_gendev, data, fn); } diff --git a/drivers/slimbus/core.c b/drivers/slimbus/core.c index 65e5515f7555..005fa2ef100f 100644 --- a/drivers/slimbus/core.c +++ b/drivers/slimbus/core.c @@ -328,7 +328,8 @@ void slim_report_absent(struct slim_device *sbdev) } EXPORT_SYMBOL_GPL(slim_report_absent); -static bool slim_eaddr_equal(struct slim_eaddr *a, struct slim_eaddr *b) +static bool slim_eaddr_equal(const struct slim_eaddr *a, + const struct slim_eaddr *b) { return (a->manf_id == b->manf_id && a->prod_code == b->prod_code && @@ -336,9 +337,9 @@ static bool slim_eaddr_equal(struct slim_eaddr *a, struct slim_eaddr *b) a->instance == b->instance); } -static int slim_match_dev(struct device *dev, void *data) +static int slim_match_dev(struct device *dev, const void *data) { - struct slim_eaddr *e_addr = data; + const struct slim_eaddr *e_addr = data; struct slim_device *sbdev = to_slim_device(dev); return slim_eaddr_equal(&sbdev->e_addr, e_addr); @@ -384,21 +385,13 @@ struct slim_device *slim_get_device(struct slim_controller *ctrl, } EXPORT_SYMBOL_GPL(slim_get_device); -static int of_slim_match_dev(struct device *dev, void *data) -{ - struct device_node *np = data; - struct slim_device *sbdev = to_slim_device(dev); - - return (sbdev->dev.of_node == np); -} - static struct slim_device *of_find_slim_device(struct slim_controller *ctrl, struct device_node *np) { struct slim_device *sbdev; struct device *dev; - dev = device_find_child(ctrl->dev, np, of_slim_match_dev); + dev = device_find_child(ctrl->dev, np, device_match_of_node); if (dev) { sbdev = to_slim_device(dev); return sbdev; diff --git a/drivers/staging/greybus/camera.c b/drivers/staging/greybus/camera.c index ca71023df447..5d80ace41d8e 100644 --- a/drivers/staging/greybus/camera.c +++ b/drivers/staging/greybus/camera.c @@ -1128,18 +1128,7 @@ done: static int gb_camera_debugfs_open(struct inode *inode, struct file *file) { - unsigned int i; - - for (i = 0; i < ARRAY_SIZE(gb_camera_debugfs_entries); ++i) { - const struct gb_camera_debugfs_entry *entry = - &gb_camera_debugfs_entries[i]; - - if (!strcmp(file->f_path.dentry->d_iname, entry->name)) { - file->private_data = (void *)entry; - break; - } - } - + file->private_data = (void *)debugfs_get_aux(file); return 0; } @@ -1175,8 +1164,8 @@ static int gb_camera_debugfs_init(struct gb_camera *gcam) gcam->debugfs.buffers[i].length = 0; - debugfs_create_file(entry->name, entry->mask, - gcam->debugfs.root, gcam, + debugfs_create_file_aux(entry->name, entry->mask, + gcam->debugfs.root, gcam, entry, &gb_camera_debugfs_ops); } diff --git a/drivers/thunderbolt/retimer.c b/drivers/thunderbolt/retimer.c index eeb64433ebbc..1f25529fe05d 100644 --- a/drivers/thunderbolt/retimer.c +++ b/drivers/thunderbolt/retimer.c @@ -472,7 +472,7 @@ struct tb_retimer_lookup { u8 index; }; -static int retimer_match(struct device *dev, void *data) +static int retimer_match(struct device *dev, const void *data) { const struct tb_retimer_lookup *lookup = data; struct tb_retimer *rt = tb_to_retimer(dev); diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c index 11a50c86a1e4..b0630e6d9472 100644 --- a/drivers/thunderbolt/xdomain.c +++ b/drivers/thunderbolt/xdomain.c @@ -1026,7 +1026,7 @@ static int remove_missing_service(struct device *dev, void *data) return 0; } -static int find_service(struct device *dev, void *data) +static int find_service(struct device *dev, const void *data) { const struct tb_property *p = data; struct tb_service *svc; diff --git a/drivers/tty/mips_ejtag_fdc.c b/drivers/tty/mips_ejtag_fdc.c index afbf7738c7c4..58b28be63c79 100644 --- a/drivers/tty/mips_ejtag_fdc.c +++ b/drivers/tty/mips_ejtag_fdc.c @@ -1154,7 +1154,7 @@ static char kgdbfdc_rbuf[4]; /* write buffer to allow compaction */ static unsigned int kgdbfdc_wbuflen; -static char kgdbfdc_wbuf[4]; +static u8 kgdbfdc_wbuf[4]; static void __iomem *kgdbfdc_setup(void) { @@ -1215,7 +1215,7 @@ static int kgdbfdc_read_char(void) /* push an FDC word from write buffer to TX FIFO */ static void kgdbfdc_push_one(void) { - const char *bufs[1] = { kgdbfdc_wbuf }; + const u8 *bufs[1] = { kgdbfdc_wbuf }; struct fdc_word word; void __iomem *regs; unsigned int i; diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c index 252849910588..363afe11974f 100644 --- a/drivers/tty/n_gsm.c +++ b/drivers/tty/n_gsm.c @@ -2224,7 +2224,7 @@ static int gsm_dlci_negotiate(struct gsm_dlci *dlci) * * Some control dlci can stay in ADM mode with other dlci working just * fine. In that case we can just keep the control dlci open after the - * DLCI_OPENING retries time out. + * DLCI_OPENING receives DM. */ static void gsm_dlci_t1(struct timer_list *t) @@ -2243,16 +2243,19 @@ static void gsm_dlci_t1(struct timer_list *t) } break; case DLCI_OPENING: - if (dlci->retries) { - dlci->retries--; - gsm_command(dlci->gsm, dlci->addr, SABM|PF); - mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100); - } else if (!dlci->addr && gsm->control == (DM | PF)) { + if (!dlci->addr && gsm->control == (DM | PF)) { if (debug & DBG_ERRORS) - pr_info("DLCI %d opening in ADM mode.\n", - dlci->addr); + pr_info("DLCI 0 opening in ADM mode.\n"); dlci->mode = DLCI_MODE_ADM; gsm_dlci_open(dlci); + } else if (dlci->retries) { + if (!dlci->addr || !gsm->dlci[0] || + gsm->dlci[0]->state != DLCI_OPENING) { + dlci->retries--; + gsm_command(dlci->gsm, dlci->addr, SABM|PF); + } + + mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100); } else { gsm->open_error++; gsm_dlci_begin_close(dlci); /* prevent half open link */ @@ -2308,7 +2311,9 @@ static void gsm_dlci_begin_open(struct gsm_dlci *dlci) dlci->retries = gsm->n2; if (!need_pn) { dlci->state = DLCI_OPENING; - gsm_command(gsm, dlci->addr, SABM|PF); + if (!dlci->addr || !gsm->dlci[0] || + gsm->dlci[0]->state != DLCI_OPENING) + gsm_command(gsm, dlci->addr, SABM|PF); } else { /* Configure DLCI before setup */ dlci->state = DLCI_CONFIGURE; @@ -4251,7 +4256,7 @@ static const struct tty_port_operations gsm_port_ops = { static int gsmtty_install(struct tty_driver *driver, struct tty_struct *tty) { struct gsm_mux *gsm; - struct gsm_dlci *dlci; + struct gsm_dlci *dlci, *dlci0; unsigned int line = tty->index; unsigned int mux = mux_line_to_num(line); bool alloc = false; @@ -4274,10 +4279,20 @@ static int gsmtty_install(struct tty_driver *driver, struct tty_struct *tty) perspective as we don't have to worry about this if DLCI0 is lost */ mutex_lock(&gsm->mutex); - if (gsm->dlci[0] && gsm->dlci[0]->state != DLCI_OPEN) { + + dlci0 = gsm->dlci[0]; + if (dlci0 && dlci0->state != DLCI_OPEN) { mutex_unlock(&gsm->mutex); - return -EL2NSYNC; + + if (dlci0->state == DLCI_OPENING) + wait_event(gsm->event, dlci0->state != DLCI_OPENING); + + if (dlci0->state != DLCI_OPEN) + return -EL2NSYNC; + + mutex_lock(&gsm->mutex); } + dlci = gsm->dlci[line]; if (dlci == NULL) { alloc = true; diff --git a/drivers/tty/serial/8250/8250.h b/drivers/tty/serial/8250/8250.h index e5310c65cf52..11e05aa014e5 100644 --- a/drivers/tty/serial/8250/8250.h +++ b/drivers/tty/serial/8250/8250.h @@ -231,8 +231,8 @@ void serial8250_rpm_put_tx(struct uart_8250_port *p); int serial8250_em485_config(struct uart_port *port, struct ktermios *termios, struct serial_rs485 *rs485); -void serial8250_em485_start_tx(struct uart_8250_port *p); -void serial8250_em485_stop_tx(struct uart_8250_port *p); +void serial8250_em485_start_tx(struct uart_8250_port *p, bool toggle_ier); +void serial8250_em485_stop_tx(struct uart_8250_port *p, bool toggle_ier); void serial8250_em485_destroy(struct uart_8250_port *p); extern struct serial_rs485 serial8250_em485_supported; diff --git a/drivers/tty/serial/8250/8250_bcm2835aux.c b/drivers/tty/serial/8250/8250_bcm2835aux.c index fdb53b54e99e..0609582a62f7 100644 --- a/drivers/tty/serial/8250/8250_bcm2835aux.c +++ b/drivers/tty/serial/8250/8250_bcm2835aux.c @@ -46,7 +46,7 @@ struct bcm2835aux_data { u32 cntl; }; -static void bcm2835aux_rs485_start_tx(struct uart_8250_port *up) +static void bcm2835aux_rs485_start_tx(struct uart_8250_port *up, bool toggle_ier) { if (!(up->port.rs485.flags & SER_RS485_RX_DURING_TX)) { struct bcm2835aux_data *data = dev_get_drvdata(up->port.dev); @@ -65,7 +65,7 @@ static void bcm2835aux_rs485_start_tx(struct uart_8250_port *up) serial8250_out_MCR(up, UART_MCR_RTS); } -static void bcm2835aux_rs485_stop_tx(struct uart_8250_port *up) +static void bcm2835aux_rs485_stop_tx(struct uart_8250_port *up, bool toggle_ier) { if (up->port.rs485.flags & SER_RS485_RTS_AFTER_SEND) serial8250_out_MCR(up, 0); diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c index 68baf75bdadc..6f676bb37ac3 100644 --- a/drivers/tty/serial/8250/8250_core.c +++ b/drivers/tty/serial/8250/8250_core.c @@ -675,7 +675,6 @@ static void serial_8250_overrun_backoff_work(struct work_struct *work) uart_port_lock_irqsave(port, &flags); up->ier |= UART_IER_RLSI | UART_IER_RDI; - up->port.read_status_mask |= UART_LSR_DR; serial_out(up, UART_IER, up->ier); uart_port_unlock_irqrestore(port, flags); } diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c index 9eb9aa766811..c2b75e3f106d 100644 --- a/drivers/tty/serial/8250/8250_omap.c +++ b/drivers/tty/serial/8250/8250_omap.c @@ -365,7 +365,7 @@ static void omap8250_restore_regs(struct uart_8250_port *up) if (up->port.rs485.flags & SER_RS485_ENABLED && up->port.rs485_config == serial8250_em485_config) - serial8250_em485_stop_tx(up); + serial8250_em485_stop_tx(up, true); } /* @@ -412,7 +412,13 @@ static void omap_8250_set_termios(struct uart_port *port, */ uart_update_timeout(port, termios->c_cflag, baud); - up->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR; + /* + * Specify which conditions may be considered for error + * handling and the ignoring of characters. The actual + * ignoring of characters only occurs if the bit is set + * in @ignore_status_mask as well. + */ + up->port.read_status_mask = UART_LSR_OE | UART_LSR_DR; if (termios->c_iflag & INPCK) up->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE; if (termios->c_iflag & (IGNBRK | PARMRK)) @@ -838,7 +844,6 @@ static void omap_8250_unthrottle(struct uart_port *port) if (up->dma) up->dma->rx_dma(up); up->ier |= UART_IER_RLSI | UART_IER_RDI; - port->read_status_mask |= UART_LSR_DR; serial_out(up, UART_IER, up->ier); uart_port_unlock_irqrestore(port, flags); diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c index 3c3f7c926afb..df4d0d832e54 100644 --- a/drivers/tty/serial/8250/8250_pci.c +++ b/drivers/tty/serial/8250/8250_pci.c @@ -64,23 +64,17 @@ #define PCIE_DEVICE_ID_NEO_2_OX_IBM 0x00F6 #define PCI_DEVICE_ID_PLX_CRONYX_OMEGA 0xc001 #define PCI_DEVICE_ID_INTEL_PATSBURG_KT 0x1d3d -#define PCI_VENDOR_ID_WCH 0x4348 -#define PCI_DEVICE_ID_WCH_CH352_2S 0x3253 -#define PCI_DEVICE_ID_WCH_CH353_4S 0x3453 -#define PCI_DEVICE_ID_WCH_CH353_2S1PF 0x5046 -#define PCI_DEVICE_ID_WCH_CH353_1S1P 0x5053 -#define PCI_DEVICE_ID_WCH_CH353_2S1P 0x7053 -#define PCI_DEVICE_ID_WCH_CH355_4S 0x7173 + +#define PCI_DEVICE_ID_WCHCN_CH352_2S 0x3253 +#define PCI_DEVICE_ID_WCHCN_CH355_4S 0x7173 + #define PCI_VENDOR_ID_AGESTAR 0x5372 #define PCI_DEVICE_ID_AGESTAR_9375 0x6872 #define PCI_DEVICE_ID_BROADCOM_TRUMANAGE 0x160a #define PCI_DEVICE_ID_AMCC_ADDIDATA_APCI7800 0x818e -#define PCIE_VENDOR_ID_WCH 0x1c00 -#define PCIE_DEVICE_ID_WCH_CH382_2S1P 0x3250 -#define PCIE_DEVICE_ID_WCH_CH384_4S 0x3470 -#define PCIE_DEVICE_ID_WCH_CH384_8S 0x3853 -#define PCIE_DEVICE_ID_WCH_CH382_2S 0x3253 +#define PCI_DEVICE_ID_WCHIC_CH384_4S 0x3470 +#define PCI_DEVICE_ID_WCHIC_CH384_8S 0x3853 #define PCI_DEVICE_ID_MOXA_CP102E 0x1024 #define PCI_DEVICE_ID_MOXA_CP102EL 0x1025 @@ -2817,80 +2811,80 @@ static struct pci_serial_quirk pci_serial_quirks[] = { }, /* WCH CH353 1S1P card (16550 clone) */ { - .vendor = PCI_VENDOR_ID_WCH, - .device = PCI_DEVICE_ID_WCH_CH353_1S1P, + .vendor = PCI_VENDOR_ID_WCHCN, + .device = PCI_DEVICE_ID_WCHCN_CH353_1S1P, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .setup = pci_wch_ch353_setup, }, /* WCH CH353 2S1P card (16550 clone) */ { - .vendor = PCI_VENDOR_ID_WCH, - .device = PCI_DEVICE_ID_WCH_CH353_2S1P, + .vendor = PCI_VENDOR_ID_WCHCN, + .device = PCI_DEVICE_ID_WCHCN_CH353_2S1P, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .setup = pci_wch_ch353_setup, }, /* WCH CH353 4S card (16550 clone) */ { - .vendor = PCI_VENDOR_ID_WCH, - .device = PCI_DEVICE_ID_WCH_CH353_4S, + .vendor = PCI_VENDOR_ID_WCHCN, + .device = PCI_DEVICE_ID_WCHCN_CH353_4S, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .setup = pci_wch_ch353_setup, }, /* WCH CH353 2S1PF card (16550 clone) */ { - .vendor = PCI_VENDOR_ID_WCH, - .device = PCI_DEVICE_ID_WCH_CH353_2S1PF, + .vendor = PCI_VENDOR_ID_WCHCN, + .device = PCI_DEVICE_ID_WCHCN_CH353_2S1PF, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .setup = pci_wch_ch353_setup, }, /* WCH CH352 2S card (16550 clone) */ { - .vendor = PCI_VENDOR_ID_WCH, - .device = PCI_DEVICE_ID_WCH_CH352_2S, + .vendor = PCI_VENDOR_ID_WCHCN, + .device = PCI_DEVICE_ID_WCHCN_CH352_2S, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .setup = pci_wch_ch353_setup, }, /* WCH CH355 4S card (16550 clone) */ { - .vendor = PCI_VENDOR_ID_WCH, - .device = PCI_DEVICE_ID_WCH_CH355_4S, + .vendor = PCI_VENDOR_ID_WCHCN, + .device = PCI_DEVICE_ID_WCHCN_CH355_4S, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .setup = pci_wch_ch355_setup, }, /* WCH CH382 2S card (16850 clone) */ { - .vendor = PCIE_VENDOR_ID_WCH, - .device = PCIE_DEVICE_ID_WCH_CH382_2S, + .vendor = PCI_VENDOR_ID_WCHIC, + .device = PCI_DEVICE_ID_WCHIC_CH382_2S, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .setup = pci_wch_ch38x_setup, }, /* WCH CH382 2S1P card (16850 clone) */ { - .vendor = PCIE_VENDOR_ID_WCH, - .device = PCIE_DEVICE_ID_WCH_CH382_2S1P, + .vendor = PCI_VENDOR_ID_WCHIC, + .device = PCI_DEVICE_ID_WCHIC_CH382_2S1P, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .setup = pci_wch_ch38x_setup, }, /* WCH CH384 4S card (16850 clone) */ { - .vendor = PCIE_VENDOR_ID_WCH, - .device = PCIE_DEVICE_ID_WCH_CH384_4S, + .vendor = PCI_VENDOR_ID_WCHIC, + .device = PCI_DEVICE_ID_WCHIC_CH384_4S, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .setup = pci_wch_ch38x_setup, }, /* WCH CH384 8S card (16850 clone) */ { - .vendor = PCIE_VENDOR_ID_WCH, - .device = PCIE_DEVICE_ID_WCH_CH384_8S, + .vendor = PCI_VENDOR_ID_WCHIC, + .device = PCI_DEVICE_ID_WCHIC_CH384_8S, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .init = pci_wch_ch38x_init, @@ -3967,11 +3961,11 @@ static const struct pci_device_id blacklist[] = { /* multi-io cards handled by parport_serial */ /* WCH CH353 2S1P */ - { PCI_DEVICE(0x4348, 0x7053), 0, 0, REPORT_CONFIG(PARPORT_SERIAL), }, + { PCI_VDEVICE(WCHCN, 0x7053), REPORT_CONFIG(PARPORT_SERIAL), }, /* WCH CH353 1S1P */ - { PCI_DEVICE(0x4348, 0x5053), 0, 0, REPORT_CONFIG(PARPORT_SERIAL), }, + { PCI_VDEVICE(WCHCN, 0x5053), REPORT_CONFIG(PARPORT_SERIAL), }, /* WCH CH382 2S1P */ - { PCI_DEVICE(0x1c00, 0x3250), 0, 0, REPORT_CONFIG(PARPORT_SERIAL), }, + { PCI_VDEVICE(WCHIC, 0x3250), REPORT_CONFIG(PARPORT_SERIAL), }, /* Intel platforms with MID UART */ { PCI_VDEVICE(INTEL, 0x081b), REPORT_8250_CONFIG(MID), }, @@ -6044,27 +6038,27 @@ static const struct pci_device_id serial_pci_tbl[] = { * WCH CH353 series devices: The 2S1P is handled by parport_serial * so not listed here. */ - { PCI_VENDOR_ID_WCH, PCI_DEVICE_ID_WCH_CH353_4S, + { PCI_VENDOR_ID_WCHCN, PCI_DEVICE_ID_WCHCN_CH353_4S, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_bt_4_115200 }, - { PCI_VENDOR_ID_WCH, PCI_DEVICE_ID_WCH_CH353_2S1PF, + { PCI_VENDOR_ID_WCHCN, PCI_DEVICE_ID_WCHCN_CH353_2S1PF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_bt_2_115200 }, - { PCI_VENDOR_ID_WCH, PCI_DEVICE_ID_WCH_CH355_4S, + { PCI_VENDOR_ID_WCHCN, PCI_DEVICE_ID_WCHCN_CH355_4S, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_bt_4_115200 }, - { PCIE_VENDOR_ID_WCH, PCIE_DEVICE_ID_WCH_CH382_2S, + { PCI_VENDOR_ID_WCHIC, PCI_DEVICE_ID_WCHIC_CH382_2S, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_wch382_2 }, - { PCIE_VENDOR_ID_WCH, PCIE_DEVICE_ID_WCH_CH384_4S, + { PCI_VENDOR_ID_WCHIC, PCI_DEVICE_ID_WCHIC_CH384_4S, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_wch384_4 }, - { PCIE_VENDOR_ID_WCH, PCIE_DEVICE_ID_WCH_CH384_8S, + { PCI_VENDOR_ID_WCHIC, PCI_DEVICE_ID_WCHIC_CH384_8S, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_wch384_8 }, /* diff --git a/drivers/tty/serial/8250/8250_pci1xxxx.c b/drivers/tty/serial/8250/8250_pci1xxxx.c index 838f181f929b..e9c51d4e447d 100644 --- a/drivers/tty/serial/8250/8250_pci1xxxx.c +++ b/drivers/tty/serial/8250/8250_pci1xxxx.c @@ -78,6 +78,12 @@ #define UART_TX_BYTE_FIFO 0x00 #define UART_FIFO_CTL 0x02 +#define UART_MODEM_CTL_REG 0x04 +#define UART_MODEM_CTL_RTS_SET BIT(1) + +#define UART_LINE_STAT_REG 0x05 +#define UART_LINE_XMIT_CHECK_MASK GENMASK(6, 5) + #define UART_ACTV_REG 0x11 #define UART_BLOCK_SET_ACTIVE BIT(0) @@ -94,6 +100,7 @@ #define UART_BIT_SAMPLE_CNT_16 16 #define BAUD_CLOCK_DIV_INT_MSK GENMASK(31, 8) #define ADCL_CFG_RTS_DELAY_MASK GENMASK(11, 8) +#define FRAC_DIV_TX_END_POINT_MASK GENMASK(23, 20) #define UART_WAKE_REG 0x8C #define UART_WAKE_MASK_REG 0x90 @@ -134,6 +141,11 @@ #define UART_BST_STAT_LSR_FRAME_ERR 0x8000000 #define UART_BST_STAT_LSR_THRE 0x20000000 +#define GET_MODEM_CTL_RTS_STATUS(reg) ((reg) & UART_MODEM_CTL_RTS_SET) +#define GET_RTS_PIN_STATUS(val) (((val) & TIOCM_RTS) >> 1) +#define RTS_TOGGLE_STATUS_MASK(val, reg) (GET_MODEM_CTL_RTS_STATUS(reg) \ + != GET_RTS_PIN_STATUS(val)) + struct pci1xxxx_8250 { unsigned int nr; u8 dev_rev; @@ -254,6 +266,47 @@ static void pci1xxxx_set_divisor(struct uart_port *port, unsigned int baud, port->membase + UART_BAUD_CLK_DIVISOR_REG); } +static void pci1xxxx_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ + u32 fract_div_cfg_reg; + u32 line_stat_reg; + u32 modem_ctl_reg; + u32 adcl_cfg_reg; + + adcl_cfg_reg = readl(port->membase + ADCL_CFG_REG); + + /* HW is responsible in ADCL_EN case */ + if ((adcl_cfg_reg & (ADCL_CFG_EN | ADCL_CFG_PIN_SEL))) + return; + + modem_ctl_reg = readl(port->membase + UART_MODEM_CTL_REG); + + serial8250_do_set_mctrl(port, mctrl); + + if (RTS_TOGGLE_STATUS_MASK(mctrl, modem_ctl_reg)) { + line_stat_reg = readl(port->membase + UART_LINE_STAT_REG); + if (line_stat_reg & UART_LINE_XMIT_CHECK_MASK) { + fract_div_cfg_reg = readl(port->membase + + FRAC_DIV_CFG_REG); + + writel((fract_div_cfg_reg & + ~(FRAC_DIV_TX_END_POINT_MASK)), + port->membase + FRAC_DIV_CFG_REG); + + /* Enable ADC and set the nRTS pin */ + writel((adcl_cfg_reg | (ADCL_CFG_EN | + ADCL_CFG_PIN_SEL)), + port->membase + ADCL_CFG_REG); + + /* Revert to the original settings */ + writel(adcl_cfg_reg, port->membase + ADCL_CFG_REG); + + writel(fract_div_cfg_reg, port->membase + + FRAC_DIV_CFG_REG); + } + } +} + static int pci1xxxx_rs485_config(struct uart_port *port, struct ktermios *termios, struct serial_rs485 *rs485) @@ -631,9 +684,14 @@ static int pci1xxxx_setup(struct pci_dev *pdev, port->port.rs485_config = pci1xxxx_rs485_config; port->port.rs485_supported = pci1xxxx_rs485_supported; - /* From C0 rev Burst operation is supported */ + /* + * C0 and later revisions support Burst operation. + * RTS workaround in mctrl is applicable only to B0. + */ if (rev >= 0xC0) port->port.handle_irq = pci1xxxx_handle_irq; + else if (rev == 0xB0) + port->port.set_mctrl = pci1xxxx_set_mctrl; ret = serial8250_pci_setup_port(pdev, port, 0, PORT_OFFSET * port_idx, 0); if (ret < 0) diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index 649e74e9b52f..d7976a21cca9 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c @@ -578,7 +578,7 @@ static int serial8250_em485_init(struct uart_8250_port *p) deassert_rts: if (p->em485->tx_stopped) - p->rs485_stop_tx(p); + p->rs485_stop_tx(p, true); return 0; } @@ -1390,7 +1390,6 @@ static void serial8250_stop_rx(struct uart_port *port) serial8250_rpm_get(up); up->ier &= ~(UART_IER_RLSI | UART_IER_RDI); - up->port.read_status_mask &= ~UART_LSR_DR; serial_port_out(port, UART_IER, up->ier); serial8250_rpm_put(up); @@ -1399,10 +1398,11 @@ static void serial8250_stop_rx(struct uart_port *port) /** * serial8250_em485_stop_tx() - generic ->rs485_stop_tx() callback * @p: uart 8250 port + * @toggle_ier: true to allow enabling receive interrupts * * Generic callback usable by 8250 uart drivers to stop rs485 transmission. */ -void serial8250_em485_stop_tx(struct uart_8250_port *p) +void serial8250_em485_stop_tx(struct uart_8250_port *p, bool toggle_ier) { unsigned char mcr = serial8250_in_MCR(p); @@ -1423,8 +1423,10 @@ void serial8250_em485_stop_tx(struct uart_8250_port *p) if (!(p->port.rs485.flags & SER_RS485_RX_DURING_TX)) { serial8250_clear_and_reinit_fifos(p); - p->ier |= UART_IER_RLSI | UART_IER_RDI; - serial_port_out(&p->port, UART_IER, p->ier); + if (toggle_ier) { + p->ier |= UART_IER_RLSI | UART_IER_RDI; + serial_port_out(&p->port, UART_IER, p->ier); + } } } EXPORT_SYMBOL_GPL(serial8250_em485_stop_tx); @@ -1439,7 +1441,7 @@ static enum hrtimer_restart serial8250_em485_handle_stop_tx(struct hrtimer *t) serial8250_rpm_get(p); uart_port_lock_irqsave(&p->port, &flags); if (em485->active_timer == &em485->stop_tx_timer) { - p->rs485_stop_tx(p); + p->rs485_stop_tx(p, true); em485->active_timer = NULL; em485->tx_stopped = true; } @@ -1471,7 +1473,7 @@ static void __stop_tx_rs485(struct uart_8250_port *p, u64 stop_delay) em485->active_timer = &em485->stop_tx_timer; hrtimer_start(&em485->stop_tx_timer, ns_to_ktime(stop_delay), HRTIMER_MODE_REL); } else { - p->rs485_stop_tx(p); + p->rs485_stop_tx(p, true); em485->active_timer = NULL; em485->tx_stopped = true; } @@ -1560,6 +1562,7 @@ static inline void __start_tx(struct uart_port *port) /** * serial8250_em485_start_tx() - generic ->rs485_start_tx() callback * @up: uart 8250 port + * @toggle_ier: true to allow disabling receive interrupts * * Generic callback usable by 8250 uart drivers to start rs485 transmission. * Assumes that setting the RTS bit in the MCR register means RTS is high. @@ -1567,11 +1570,11 @@ static inline void __start_tx(struct uart_port *port) * stoppable by disabling the UART_IER_RDI interrupt. (Some chips set the * UART_LSR_DR bit even when UART_IER_RDI is disabled, foiling this approach.) */ -void serial8250_em485_start_tx(struct uart_8250_port *up) +void serial8250_em485_start_tx(struct uart_8250_port *up, bool toggle_ier) { unsigned char mcr = serial8250_in_MCR(up); - if (!(up->port.rs485.flags & SER_RS485_RX_DURING_TX)) + if (!(up->port.rs485.flags & SER_RS485_RX_DURING_TX) && toggle_ier) serial8250_stop_rx(&up->port); if (up->port.rs485.flags & SER_RS485_RTS_ON_SEND) @@ -1605,7 +1608,7 @@ static bool start_tx_rs485(struct uart_port *port) if (em485->tx_stopped) { em485->tx_stopped = false; - up->rs485_start_tx(up); + up->rs485_start_tx(up, true); if (up->port.rs485.delay_rts_before_send > 0) { em485->active_timer = &em485->start_tx_timer; @@ -1931,7 +1934,7 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir) */ if (!(status & (UART_LSR_FIFOE | UART_LSR_BRK_ERROR_BITS)) && (port->status & (UPSTAT_AUTOCTS | UPSTAT_AUTORTS)) && - !(port->read_status_mask & UART_LSR_DR)) + !(up->ier & (UART_IER_RLSI | UART_IER_RDI))) skip_rx = true; if (status & (UART_LSR_DR | UART_LSR_BI) && !skip_rx) { @@ -2079,11 +2082,20 @@ static void serial8250_break_ctl(struct uart_port *port, int break_state) serial8250_rpm_put(up); } -static void wait_for_lsr(struct uart_8250_port *up, int bits) +/* Returns true if @bits were set, false on timeout */ +static bool wait_for_lsr(struct uart_8250_port *up, int bits) { - unsigned int status, tmout = 10000; + unsigned int status, tmout; + + /* + * Wait for a character to be sent. Fallback to a safe default + * timeout value if @frame_time is not available. + */ + if (up->port.frame_time) + tmout = up->port.frame_time * 2 / NSEC_PER_USEC; + else + tmout = 10000; - /* Wait up to 10ms for the character(s) to be sent. */ for (;;) { status = serial_lsr_in(up); @@ -2094,11 +2106,11 @@ static void wait_for_lsr(struct uart_8250_port *up, int bits) udelay(1); touch_nmi_watchdog(); } + + return (tmout != 0); } -/* - * Wait for transmitter & holding register to empty - */ +/* Wait for transmitter and holding register to empty with timeout */ static void wait_for_xmitr(struct uart_8250_port *up, int bits) { unsigned int tmout; @@ -2786,7 +2798,13 @@ serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios, */ uart_update_timeout(port, termios->c_cflag, baud); - port->read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR; + /* + * Specify which conditions may be considered for error + * handling and the ignoring of characters. The actual + * ignoring of characters only occurs if the bit is set + * in @ignore_status_mask as well. + */ + port->read_status_mask = UART_LSR_OE | UART_LSR_DR; if (termios->c_iflag & INPCK) port->read_status_mask |= UART_LSR_FE | UART_LSR_PE; if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK)) @@ -3250,7 +3268,7 @@ void serial8250_init_port(struct uart_8250_port *up) port->ops = &serial8250_pops; port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_8250_CONSOLE); - up->cur_iotype = 0xFF; + up->cur_iotype = UPIO_UNKNOWN; } EXPORT_SYMBOL_GPL(serial8250_init_port); @@ -3285,10 +3303,15 @@ EXPORT_SYMBOL_GPL(serial8250_set_defaults); static void serial8250_console_putchar(struct uart_port *port, unsigned char ch) { + serial_port_out(port, UART_TX, ch); +} + +static void serial8250_console_wait_putchar(struct uart_port *port, unsigned char ch) +{ struct uart_8250_port *up = up_to_u8250p(port); wait_for_xmitr(up, UART_LSR_THRE); - serial_port_out(port, UART_TX, ch); + serial8250_console_putchar(port, ch); } /* @@ -3317,6 +3340,16 @@ static void serial8250_console_restore(struct uart_8250_port *up) serial8250_out_MCR(up, up->mcr | UART_MCR_DTR | UART_MCR_RTS); } +static void fifo_wait_for_lsr(struct uart_8250_port *up, unsigned int count) +{ + unsigned int i; + + for (i = 0; i < count; i++) { + if (wait_for_lsr(up, UART_LSR_THRE)) + return; + } +} + /* * Print a string to the serial port using the device FIFO * @@ -3326,24 +3359,34 @@ static void serial8250_console_restore(struct uart_8250_port *up) static void serial8250_console_fifo_write(struct uart_8250_port *up, const char *s, unsigned int count) { - int i; const char *end = s + count; unsigned int fifosize = up->tx_loadsz; + struct uart_port *port = &up->port; + unsigned int tx_count = 0; bool cr_sent = false; + unsigned int i; while (s != end) { - wait_for_lsr(up, UART_LSR_THRE); + /* Allow timeout for each byte of a possibly full FIFO */ + fifo_wait_for_lsr(up, fifosize); for (i = 0; i < fifosize && s != end; ++i) { if (*s == '\n' && !cr_sent) { - serial_out(up, UART_TX, '\r'); + serial8250_console_putchar(port, '\r'); cr_sent = true; } else { - serial_out(up, UART_TX, *s++); + serial8250_console_putchar(port, *s++); cr_sent = false; } } + tx_count = i; } + + /* + * Allow timeout for each byte written since the caller will only wait + * for UART_LSR_BOTH_EMPTY using the timeout of a single character + */ + fifo_wait_for_lsr(up, tx_count); } /* @@ -3385,7 +3428,7 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s, if (em485) { if (em485->tx_stopped) - up->rs485_start_tx(up); + up->rs485_start_tx(up, false); mdelay(port->rs485.delay_rts_before_send); } @@ -3412,7 +3455,7 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s, if (likely(use_fifo)) serial8250_console_fifo_write(up, s, count); else - uart_console_write(port, s, count, serial8250_console_putchar); + uart_console_write(port, s, count, serial8250_console_wait_putchar); /* * Finally, wait for transmitter to become empty @@ -3423,7 +3466,7 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s, if (em485) { mdelay(port->rs485.delay_rts_after_send); if (em485->tx_stopped) - up->rs485_stop_tx(up); + up->rs485_stop_tx(up, false); } serial_port_out(port, UART_IER, ier); diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig index 45f0f779fbf9..976dae3bb1bb 100644 --- a/drivers/tty/serial/Kconfig +++ b/drivers/tty/serial/Kconfig @@ -128,7 +128,7 @@ config SERIAL_SB1250_DUART_CONSOLE config SERIAL_ATMEL bool "AT91 on-chip serial port support" depends on COMMON_CLK - depends on ARCH_AT91 || COMPILE_TEST + depends on ARCH_AT91 || ARCH_LAN969X || COMPILE_TEST select SERIAL_CORE select SERIAL_MCTRL_GPIO if GPIOLIB select MFD_AT91_USART diff --git a/drivers/tty/serial/altera_jtaguart.c b/drivers/tty/serial/altera_jtaguart.c index b9c3c3bed0c1..d47a62d1c9f7 100644 --- a/drivers/tty/serial/altera_jtaguart.c +++ b/drivers/tty/serial/altera_jtaguart.c @@ -24,8 +24,6 @@ #include <linux/io.h> #include <linux/altera_jtaguart.h> -#define DRV_NAME "altera_jtaguart" - /* * Altera JTAG UART register definitions according to the Altera JTAG UART * datasheet: https://www.altera.com/literature/hb/nios2/n2cpu_nii51009.pdf @@ -173,7 +171,7 @@ static int altera_jtaguart_startup(struct uart_port *port) int ret; ret = request_irq(port->irq, altera_jtaguart_interrupt, 0, - DRV_NAME, port); + dev_name(port->dev), port); if (ret) { dev_err(port->dev, "unable to attach Altera JTAG UART %d interrupt vector=%d\n", port->line, port->irq); @@ -365,7 +363,7 @@ OF_EARLYCON_DECLARE(juart, "altr,juart-1.0", altera_jtaguart_earlycon_setup); static struct uart_driver altera_jtaguart_driver = { .owner = THIS_MODULE, - .driver_name = "altera_jtaguart", + .driver_name = KBUILD_MODNAME, .dev_name = "ttyJ", .major = ALTERA_JTAGUART_MAJOR, .minor = ALTERA_JTAGUART_MINOR, @@ -451,7 +449,7 @@ static struct platform_driver altera_jtaguart_platform_driver = { .probe = altera_jtaguart_probe, .remove = altera_jtaguart_remove, .driver = { - .name = DRV_NAME, + .name = KBUILD_MODNAME, .of_match_table = of_match_ptr(altera_jtaguart_match), }, }; @@ -481,4 +479,4 @@ module_exit(altera_jtaguart_exit); MODULE_DESCRIPTION("Altera JTAG UART driver"); MODULE_AUTHOR("Thomas Chou <thomas@wytron.com.tw>"); MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:" DRV_NAME); +MODULE_ALIAS("platform:" KBUILD_MODNAME); diff --git a/drivers/tty/serial/altera_uart.c b/drivers/tty/serial/altera_uart.c index c94655453c33..1759137121cc 100644 --- a/drivers/tty/serial/altera_uart.c +++ b/drivers/tty/serial/altera_uart.c @@ -24,7 +24,6 @@ #include <linux/io.h> #include <linux/altera_uart.h> -#define DRV_NAME "altera_uart" #define SERIAL_ALTERA_MAJOR 204 #define SERIAL_ALTERA_MINOR 213 @@ -518,7 +517,7 @@ OF_EARLYCON_DECLARE(uart, "altr,uart-1.0", altera_uart_earlycon_setup); */ static struct uart_driver altera_uart_driver = { .owner = THIS_MODULE, - .driver_name = DRV_NAME, + .driver_name = KBUILD_MODNAME, .dev_name = "ttyAL", .major = SERIAL_ALTERA_MAJOR, .minor = SERIAL_ALTERA_MINOR, @@ -619,7 +618,7 @@ static struct platform_driver altera_uart_platform_driver = { .probe = altera_uart_probe, .remove = altera_uart_remove, .driver = { - .name = DRV_NAME, + .name = KBUILD_MODNAME, .of_match_table = of_match_ptr(altera_uart_match), }, }; @@ -649,5 +648,5 @@ module_exit(altera_uart_exit); MODULE_DESCRIPTION("Altera UART driver"); MODULE_AUTHOR("Thomas Chou <thomas@wytron.com.tw>"); MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:" DRV_NAME); +MODULE_ALIAS("platform:" KBUILD_MODNAME); MODULE_ALIAS_CHARDEV_MAJOR(SERIAL_ALTERA_MAJOR); diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c index 69b7a3e1e418..04212c823a91 100644 --- a/drivers/tty/serial/amba-pl011.c +++ b/drivers/tty/serial/amba-pl011.c @@ -248,6 +248,13 @@ struct pl011_dmatx_data { bool queued; }; +enum pl011_rs485_tx_state { + OFF, + WAIT_AFTER_RTS, + SEND, + WAIT_AFTER_SEND, +}; + /* * We wrap our port structure around the generic uart_port. */ @@ -261,8 +268,10 @@ struct uart_amba_port { unsigned int fifosize; /* vendor-specific */ unsigned int fixed_baud; /* vendor-set fixed baud rate */ char type[12]; - bool rs485_tx_started; - unsigned int rs485_tx_drain_interval; /* usecs */ + ktime_t rs485_tx_drain_interval; /* nano */ + enum pl011_rs485_tx_state rs485_tx_state; + struct hrtimer trigger_start_tx; + struct hrtimer trigger_stop_tx; #ifdef CONFIG_DMA_ENGINE /* DMA stuff */ unsigned int dmacr; /* dma control reg */ @@ -1260,30 +1269,31 @@ static inline bool pl011_dma_rx_running(struct uart_amba_port *uap) static void pl011_rs485_tx_stop(struct uart_amba_port *uap) { - /* - * To be on the safe side only time out after twice as many iterations - * as fifo size. - */ - const int MAX_TX_DRAIN_ITERS = uap->port.fifosize * 2; struct uart_port *port = &uap->port; - int i = 0; u32 cr; - /* Wait until hardware tx queue is empty */ - while (!pl011_tx_empty(port)) { - if (i > MAX_TX_DRAIN_ITERS) { - dev_warn(port->dev, - "timeout while draining hardware tx queue\n"); - break; - } + if (uap->rs485_tx_state == SEND) + uap->rs485_tx_state = WAIT_AFTER_SEND; - udelay(uap->rs485_tx_drain_interval); - i++; + if (uap->rs485_tx_state == WAIT_AFTER_SEND) { + /* Schedule hrtimer if tx queue not empty */ + if (!pl011_tx_empty(port)) { + hrtimer_start(&uap->trigger_stop_tx, + uap->rs485_tx_drain_interval, + HRTIMER_MODE_REL); + return; + } + if (port->rs485.delay_rts_after_send > 0) { + hrtimer_start(&uap->trigger_stop_tx, + ms_to_ktime(port->rs485.delay_rts_after_send), + HRTIMER_MODE_REL); + return; + } + /* Continue without any delay */ + } else if (uap->rs485_tx_state == WAIT_AFTER_RTS) { + hrtimer_try_to_cancel(&uap->trigger_start_tx); } - if (port->rs485.delay_rts_after_send) - mdelay(port->rs485.delay_rts_after_send); - cr = pl011_read(uap, REG_CR); if (port->rs485.flags & SER_RS485_RTS_AFTER_SEND) @@ -1296,7 +1306,7 @@ static void pl011_rs485_tx_stop(struct uart_amba_port *uap) cr |= UART011_CR_RXE; pl011_write(cr, uap, REG_CR); - uap->rs485_tx_started = false; + uap->rs485_tx_state = OFF; } static void pl011_stop_tx(struct uart_port *port) @@ -1304,11 +1314,18 @@ static void pl011_stop_tx(struct uart_port *port) struct uart_amba_port *uap = container_of(port, struct uart_amba_port, port); + if (port->rs485.flags & SER_RS485_ENABLED && + uap->rs485_tx_state == WAIT_AFTER_RTS) { + pl011_rs485_tx_stop(uap); + return; + } + uap->im &= ~UART011_TXIM; pl011_write(uap->im, uap, REG_IMSC); pl011_dma_tx_stop(uap); - if ((port->rs485.flags & SER_RS485_ENABLED) && uap->rs485_tx_started) + if (port->rs485.flags & SER_RS485_ENABLED && + uap->rs485_tx_state != OFF) pl011_rs485_tx_stop(uap); } @@ -1328,10 +1345,19 @@ static void pl011_rs485_tx_start(struct uart_amba_port *uap) struct uart_port *port = &uap->port; u32 cr; + if (uap->rs485_tx_state == WAIT_AFTER_RTS) { + uap->rs485_tx_state = SEND; + return; + } + if (uap->rs485_tx_state == WAIT_AFTER_SEND) { + hrtimer_try_to_cancel(&uap->trigger_stop_tx); + uap->rs485_tx_state = SEND; + return; + } + /* uap->rs485_tx_state == OFF */ /* Enable transmitter */ cr = pl011_read(uap, REG_CR); cr |= UART011_CR_TXE; - /* Disable receiver if half-duplex */ if (!(port->rs485.flags & SER_RS485_RX_DURING_TX)) cr &= ~UART011_CR_RXE; @@ -1343,10 +1369,14 @@ static void pl011_rs485_tx_start(struct uart_amba_port *uap) pl011_write(cr, uap, REG_CR); - if (port->rs485.delay_rts_before_send) - mdelay(port->rs485.delay_rts_before_send); - - uap->rs485_tx_started = true; + if (port->rs485.delay_rts_before_send > 0) { + uap->rs485_tx_state = WAIT_AFTER_RTS; + hrtimer_start(&uap->trigger_start_tx, + ms_to_ktime(port->rs485.delay_rts_before_send), + HRTIMER_MODE_REL); + } else { + uap->rs485_tx_state = SEND; + } } static void pl011_start_tx(struct uart_port *port) @@ -1355,13 +1385,44 @@ static void pl011_start_tx(struct uart_port *port) container_of(port, struct uart_amba_port, port); if ((uap->port.rs485.flags & SER_RS485_ENABLED) && - !uap->rs485_tx_started) + uap->rs485_tx_state != SEND) { pl011_rs485_tx_start(uap); + if (uap->rs485_tx_state == WAIT_AFTER_RTS) + return; + } if (!pl011_dma_tx_start(uap)) pl011_start_tx_pio(uap); } +static enum hrtimer_restart pl011_trigger_start_tx(struct hrtimer *t) +{ + struct uart_amba_port *uap = + container_of(t, struct uart_amba_port, trigger_start_tx); + unsigned long flags; + + uart_port_lock_irqsave(&uap->port, &flags); + if (uap->rs485_tx_state == WAIT_AFTER_RTS) + pl011_start_tx(&uap->port); + uart_port_unlock_irqrestore(&uap->port, flags); + + return HRTIMER_NORESTART; +} + +static enum hrtimer_restart pl011_trigger_stop_tx(struct hrtimer *t) +{ + struct uart_amba_port *uap = + container_of(t, struct uart_amba_port, trigger_stop_tx); + unsigned long flags; + + uart_port_lock_irqsave(&uap->port, &flags); + if (uap->rs485_tx_state == WAIT_AFTER_SEND) + pl011_rs485_tx_stop(uap); + uart_port_unlock_irqrestore(&uap->port, flags); + + return HRTIMER_NORESTART; +} + static void pl011_stop_rx(struct uart_port *port) { struct uart_amba_port *uap = @@ -1953,7 +2014,7 @@ static void pl011_shutdown(struct uart_port *port) pl011_dma_shutdown(uap); - if ((port->rs485.flags & SER_RS485_ENABLED) && uap->rs485_tx_started) + if ((port->rs485.flags & SER_RS485_ENABLED && uap->rs485_tx_state != OFF)) pl011_rs485_tx_stop(uap); free_irq(uap->port.irq, uap); @@ -2098,7 +2159,7 @@ pl011_set_termios(struct uart_port *port, struct ktermios *termios, * with the given baud rate. We use this as the poll interval when we * wait for the tx queue to empty. */ - uap->rs485_tx_drain_interval = DIV_ROUND_UP(bits * 1000 * 1000, baud); + uap->rs485_tx_drain_interval = ns_to_ktime(DIV_ROUND_UP(bits * NSEC_PER_SEC, baud)); pl011_setup_status_masks(port, termios); @@ -2807,6 +2868,11 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id) } } + hrtimer_init(&uap->trigger_start_tx, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + hrtimer_init(&uap->trigger_stop_tx, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + uap->trigger_start_tx.function = pl011_trigger_start_tx; + uap->trigger_stop_tx.function = pl011_trigger_stop_tx; + ret = pl011_setup_port(&dev->dev, uap, &dev->res, portnr); if (ret) return ret; diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c index 0cf05ac18993..f44f9d20a974 100644 --- a/drivers/tty/serial/atmel_serial.c +++ b/drivers/tty/serial/atmel_serial.c @@ -1727,26 +1727,16 @@ static void atmel_init_property(struct atmel_uart_port *atmel_port, /* DMA/PDC usage specification */ if (of_property_read_bool(np, "atmel,use-dma-rx")) { - if (of_property_read_bool(np, "dmas")) { - atmel_port->use_dma_rx = true; - atmel_port->use_pdc_rx = false; - } else { - atmel_port->use_dma_rx = false; - atmel_port->use_pdc_rx = true; - } + atmel_port->use_dma_rx = of_property_present(np, "dmas"); + atmel_port->use_pdc_rx = !atmel_port->use_dma_rx; } else { atmel_port->use_dma_rx = false; atmel_port->use_pdc_rx = false; } if (of_property_read_bool(np, "atmel,use-dma-tx")) { - if (of_property_read_bool(np, "dmas")) { - atmel_port->use_dma_tx = true; - atmel_port->use_pdc_tx = false; - } else { - atmel_port->use_dma_tx = false; - atmel_port->use_pdc_tx = true; - } + atmel_port->use_dma_tx = of_property_present(np, "dmas"); + atmel_port->use_pdc_tx = !atmel_port->use_dma_tx; } else { atmel_port->use_dma_tx = false; atmel_port->use_pdc_tx = false; diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c index 57b0632a3db6..c91b9d9818cd 100644 --- a/drivers/tty/serial/fsl_lpuart.c +++ b/drivers/tty/serial/fsl_lpuart.c @@ -245,7 +245,7 @@ #define DRIVER_NAME "fsl-lpuart" #define DEV_NAME "ttyLP" -#define UART_NR 8 +#define UART_NR 12 /* IMX lpuart has four extra unused regs located at the beginning */ #define IMX_REG_OFF 0x10 @@ -1965,6 +1965,11 @@ static void lpuart32_shutdown(struct uart_port *port) UARTCTRL_TIE | UARTCTRL_TCIE | UARTCTRL_RIE | UARTCTRL_SBK); lpuart32_write(port, temp, UARTCTRL); + /* flush Rx/Tx FIFO */ + temp = lpuart32_read(port, UARTFIFO); + temp |= UARTFIFO_TXFLUSH | UARTFIFO_RXFLUSH; + lpuart32_write(port, temp, UARTFIFO); + uart_port_unlock_irqrestore(port, flags); lpuart_dma_shutdown(sport); diff --git a/drivers/tty/serial/kgdb_nmi.c b/drivers/tty/serial/kgdb_nmi.c index e93850f6447a..2833708e369f 100644 --- a/drivers/tty/serial/kgdb_nmi.c +++ b/drivers/tty/serial/kgdb_nmi.c @@ -27,18 +27,6 @@ #include <linux/kgdb.h> #include <linux/kdb.h> -static int kgdb_nmi_knock = 1; -module_param_named(knock, kgdb_nmi_knock, int, 0600); -MODULE_PARM_DESC(knock, "if set to 1 (default), the special '$3#33' command " \ - "must be used to enter the debugger; when set to 0, " \ - "hitting return key is enough to enter the debugger; " \ - "when set to -1, the debugger is entered immediately " \ - "upon NMI"); - -static char *kgdb_nmi_magic = "$3#33"; -module_param_named(magic, kgdb_nmi_magic, charp, 0600); -MODULE_PARM_DESC(magic, "magic sequence to enter NMI debugger (default $3#33)"); - static atomic_t kgdb_nmi_num_readers = ATOMIC_INIT(0); static int kgdb_nmi_console_setup(struct console *co, char *options) @@ -95,95 +83,6 @@ struct kgdb_nmi_tty_priv { static struct tty_port *kgdb_nmi_port; -static void kgdb_tty_recv(int ch) -{ - struct kgdb_nmi_tty_priv *priv; - char c = ch; - - if (!kgdb_nmi_port || ch < 0) - return; - /* - * Can't use port->tty->driver_data as tty might be not there. Timer - * will check for tty and will get the ref, but here we don't have to - * do that, and actually, we can't: we're in NMI context, no locks are - * possible. - */ - priv = container_of(kgdb_nmi_port, struct kgdb_nmi_tty_priv, port); - kfifo_in(&priv->fifo, &c, 1); -} - -static int kgdb_nmi_poll_one_knock(void) -{ - static int n; - int c; - const char *magic = kgdb_nmi_magic; - size_t m = strlen(magic); - bool printch = false; - - c = dbg_io_ops->read_char(); - if (c == NO_POLL_CHAR) - return c; - - if (!kgdb_nmi_knock && (c == '\r' || c == '\n')) { - return 1; - } else if (c == magic[n]) { - n = (n + 1) % m; - if (!n) - return 1; - printch = true; - } else { - n = 0; - } - - if (atomic_read(&kgdb_nmi_num_readers)) { - kgdb_tty_recv(c); - return 0; - } - - if (printch) { - kdb_printf("%c", c); - return 0; - } - - kdb_printf("\r%s %s to enter the debugger> %*s", - kgdb_nmi_knock ? "Type" : "Hit", - kgdb_nmi_knock ? magic : "<return>", (int)m, ""); - while (m--) - kdb_printf("\b"); - return 0; -} - -/** - * kgdb_nmi_poll_knock - Check if it is time to enter the debugger - * - * "Serial ports are often noisy, especially when muxed over another port (we - * often use serial over the headset connector). Noise on the async command - * line just causes characters that are ignored, on a command line that blocked - * execution noise would be catastrophic." -- Colin Cross - * - * So, this function implements KGDB/KDB knocking on the serial line: we won't - * enter the debugger until we receive a known magic phrase (which is actually - * "$3#33", known as "escape to KDB" command. There is also a relaxed variant - * of knocking, i.e. just pressing the return key is enough to enter the - * debugger. And if knocking is disabled, the function always returns 1. - */ -bool kgdb_nmi_poll_knock(void) -{ - if (kgdb_nmi_knock < 0) - return true; - - while (1) { - int ret; - - ret = kgdb_nmi_poll_one_knock(); - if (ret == NO_POLL_CHAR) - return false; - else if (ret == 1) - break; - } - return true; -} - /* * The tasklet is cheap, it does not cause wakeups when reschedules itself, * instead it waits for the next tick. diff --git a/drivers/tty/serial/mpc52xx_uart.c b/drivers/tty/serial/mpc52xx_uart.c index f55aa353aed9..2204cc3e3b07 100644 --- a/drivers/tty/serial/mpc52xx_uart.c +++ b/drivers/tty/serial/mpc52xx_uart.c @@ -1621,7 +1621,7 @@ mpc52xx_console_setup(struct console *co, char *options) (void *)port->mapbase, port->membase, port->irq, port->uartclk); - /* Setup the port parameters accoding to options */ + /* Setup the port parameters according to options */ if (options) uart_parse_options(options, &baud, &parity, &bits, &flow); else diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c index a3093e09309f..7b51cdc274fd 100644 --- a/drivers/tty/serial/sc16is7xx.c +++ b/drivers/tty/serial/sc16is7xx.c @@ -314,6 +314,7 @@ #define SC16IS7XX_FIFO_SIZE (64) #define SC16IS7XX_GPIOS_PER_BANK 4 +#define SC16IS7XX_POLL_PERIOD_MS 10 #define SC16IS7XX_RECONF_MD BIT(0) #define SC16IS7XX_RECONF_IER BIT(1) #define SC16IS7XX_RECONF_RS485 BIT(2) @@ -348,6 +349,8 @@ struct sc16is7xx_port { u8 mctrl_mask; struct kthread_worker kworker; struct task_struct *kworker_task; + struct kthread_delayed_work poll_work; + bool polling; struct sc16is7xx_one p[]; }; @@ -861,6 +864,18 @@ static irqreturn_t sc16is7xx_irq(int irq, void *dev_id) return IRQ_HANDLED; } +static void sc16is7xx_poll_proc(struct kthread_work *ws) +{ + struct sc16is7xx_port *s = container_of(ws, struct sc16is7xx_port, poll_work.work); + + /* Reuse standard IRQ handler. Interrupt ID is unused in this context. */ + sc16is7xx_irq(0, s); + + /* Setup delay based on SC16IS7XX_POLL_PERIOD_MS */ + kthread_queue_delayed_work(&s->kworker, &s->poll_work, + msecs_to_jiffies(SC16IS7XX_POLL_PERIOD_MS)); +} + static void sc16is7xx_tx_proc(struct kthread_work *ws) { struct uart_port *port = &(to_sc16is7xx_one(ws, tx_work)->port); @@ -1149,6 +1164,7 @@ static int sc16is7xx_config_rs485(struct uart_port *port, struct ktermios *termi static int sc16is7xx_startup(struct uart_port *port) { struct sc16is7xx_one *one = to_sc16is7xx_one(port, port); + struct sc16is7xx_port *s = dev_get_drvdata(port->dev); unsigned int val; unsigned long flags; @@ -1211,6 +1227,10 @@ static int sc16is7xx_startup(struct uart_port *port) sc16is7xx_enable_ms(port); uart_port_unlock_irqrestore(port, flags); + if (s->polling) + kthread_queue_delayed_work(&s->kworker, &s->poll_work, + msecs_to_jiffies(SC16IS7XX_POLL_PERIOD_MS)); + return 0; } @@ -1232,6 +1252,9 @@ static void sc16is7xx_shutdown(struct uart_port *port) sc16is7xx_power(port, 0); + if (s->polling) + kthread_cancel_delayed_work_sync(&s->poll_work); + kthread_flush_worker(&s->kworker); } @@ -1538,6 +1561,11 @@ int sc16is7xx_probe(struct device *dev, const struct sc16is7xx_devtype *devtype, /* Always ask for fixed clock rate from a property. */ device_property_read_u32(dev, "clock-frequency", &uartclk); + s->polling = !!irq; + if (s->polling) + dev_dbg(dev, + "No interrupt pin definition, falling back to polling mode\n"); + s->clk = devm_clk_get_optional(dev, NULL); if (IS_ERR(s->clk)) return PTR_ERR(s->clk); @@ -1665,6 +1693,12 @@ int sc16is7xx_probe(struct device *dev, const struct sc16is7xx_devtype *devtype, goto out_ports; #endif + if (s->polling) { + /* Initialize kernel thread for polling */ + kthread_init_delayed_work(&s->poll_work, sc16is7xx_poll_proc); + return 0; + } + /* * Setup interrupt. We first try to acquire the IRQ line as level IRQ. * If that succeeds, we can allow sharing the interrupt as well. @@ -1724,6 +1758,9 @@ void sc16is7xx_remove(struct device *dev) sc16is7xx_power(&s->p[i].port, 0); } + if (s->polling) + kthread_cancel_delayed_work_sync(&s->poll_work); + kthread_flush_worker(&s->kworker); kthread_stop(s->kworker_task); diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index 74fa02b23772..92f7e752f862 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c @@ -790,7 +790,6 @@ static int uart_get_info(struct tty_port *port, struct serial_struct *retinfo) { struct uart_state *state = container_of(port, struct uart_state, port); struct uart_port *uport; - int ret = -ENODEV; /* Initialize structure in case we error out later to prevent any stack info leakage. */ *retinfo = (struct serial_struct){}; @@ -799,10 +798,10 @@ static int uart_get_info(struct tty_port *port, struct serial_struct *retinfo) * Ensure the state we copy is consistent and no hardware changes * occur as we go */ - mutex_lock(&port->mutex); + guard(mutex)(&port->mutex); uport = uart_port_check(state); if (!uport) - goto out; + return -ENODEV; retinfo->type = uport->type; retinfo->line = uport->line; @@ -823,10 +822,7 @@ static int uart_get_info(struct tty_port *port, struct serial_struct *retinfo) retinfo->iomem_reg_shift = uport->regshift; retinfo->iomem_base = (void *)(unsigned long)uport->mapbase; - ret = 0; -out: - mutex_unlock(&port->mutex); - return ret; + return 0; } static int uart_get_info_user(struct tty_struct *tty, @@ -838,6 +834,61 @@ static int uart_get_info_user(struct tty_struct *tty, return uart_get_info(port, ss) < 0 ? -EIO : 0; } +static int uart_change_port(struct uart_port *uport, + const struct serial_struct *new_info, + unsigned long new_port) +{ + unsigned long old_iobase, old_mapbase; + unsigned int old_type, old_iotype, old_hub6, old_shift; + int retval; + + old_iobase = uport->iobase; + old_mapbase = uport->mapbase; + old_type = uport->type; + old_hub6 = uport->hub6; + old_iotype = uport->iotype; + old_shift = uport->regshift; + + if (old_type != PORT_UNKNOWN && uport->ops->release_port) + uport->ops->release_port(uport); + + uport->iobase = new_port; + uport->type = new_info->type; + uport->hub6 = new_info->hub6; + uport->iotype = new_info->io_type; + uport->regshift = new_info->iomem_reg_shift; + uport->mapbase = (unsigned long)new_info->iomem_base; + + if (uport->type == PORT_UNKNOWN || !uport->ops->request_port) + return 0; + + retval = uport->ops->request_port(uport); + if (retval == 0) + return 0; /* succeeded => done */ + + /* + * If we fail to request resources for the new port, try to restore the + * old settings. + */ + uport->iobase = old_iobase; + uport->type = old_type; + uport->hub6 = old_hub6; + uport->iotype = old_iotype; + uport->regshift = old_shift; + uport->mapbase = old_mapbase; + + if (old_type == PORT_UNKNOWN) + return retval; + + retval = uport->ops->request_port(uport); + /* If we failed to restore the old settings, we fail like this. */ + if (retval) + uport->type = PORT_UNKNOWN; + + /* We failed anyway. */ + return -EBUSY; +} + static int uart_set_info(struct tty_struct *tty, struct tty_port *port, struct uart_state *state, struct serial_struct *new_info) @@ -847,7 +898,7 @@ static int uart_set_info(struct tty_struct *tty, struct tty_port *port, unsigned int change_irq, change_port, closing_wait; unsigned int old_custom_divisor, close_delay; upf_t old_flags, new_flags; - int retval = 0; + int retval; if (!uport) return -EIO; @@ -886,13 +937,10 @@ static int uart_set_info(struct tty_struct *tty, struct tty_port *port, if (!(uport->flags & UPF_FIXED_PORT)) { unsigned int uartclk = new_info->baud_base * 16; /* check needs to be done here before other settings made */ - if (uartclk == 0) { - retval = -EINVAL; - goto exit; - } + if (uartclk == 0) + return -EINVAL; } if (!capable(CAP_SYS_ADMIN)) { - retval = -EPERM; if (change_irq || change_port || (new_info->baud_base != uport->uartclk / 16) || (close_delay != port->close_delay) || @@ -900,7 +948,7 @@ static int uart_set_info(struct tty_struct *tty, struct tty_port *port, (new_info->xmit_fifo_size && new_info->xmit_fifo_size != uport->fifosize) || (((new_flags ^ old_flags) & ~UPF_USR_MASK) != 0)) - goto exit; + return -EPERM; uport->flags = ((uport->flags & ~UPF_USR_MASK) | (new_flags & UPF_USR_MASK)); uport->custom_divisor = new_info->custom_divisor; @@ -910,30 +958,24 @@ static int uart_set_info(struct tty_struct *tty, struct tty_port *port, if (change_irq || change_port) { retval = security_locked_down(LOCKDOWN_TIOCSSERIAL); if (retval) - goto exit; + return retval; } - /* - * Ask the low level driver to verify the settings. - */ - if (uport->ops->verify_port) + /* Ask the low level driver to verify the settings. */ + if (uport->ops->verify_port) { retval = uport->ops->verify_port(uport, new_info); + if (retval) + return retval; + } if ((new_info->irq >= irq_get_nr_irqs()) || (new_info->irq < 0) || (new_info->baud_base < 9600)) - retval = -EINVAL; - - if (retval) - goto exit; + return -EINVAL; if (change_port || change_irq) { - retval = -EBUSY; - - /* - * Make sure that we are the sole user of this port. - */ + /* Make sure that we are the sole user of this port. */ if (tty_port_users(port) > 1) - goto exit; + return -EBUSY; /* * We need to shutdown the serial port at the old @@ -943,69 +985,9 @@ static int uart_set_info(struct tty_struct *tty, struct tty_port *port, } if (change_port) { - unsigned long old_iobase, old_mapbase; - unsigned int old_type, old_iotype, old_hub6, old_shift; - - old_iobase = uport->iobase; - old_mapbase = uport->mapbase; - old_type = uport->type; - old_hub6 = uport->hub6; - old_iotype = uport->iotype; - old_shift = uport->regshift; - - /* - * Free and release old regions - */ - if (old_type != PORT_UNKNOWN && uport->ops->release_port) - uport->ops->release_port(uport); - - uport->iobase = new_port; - uport->type = new_info->type; - uport->hub6 = new_info->hub6; - uport->iotype = new_info->io_type; - uport->regshift = new_info->iomem_reg_shift; - uport->mapbase = (unsigned long)new_info->iomem_base; - - /* - * Claim and map the new regions - */ - if (uport->type != PORT_UNKNOWN && uport->ops->request_port) { - retval = uport->ops->request_port(uport); - } else { - /* Always success - Jean II */ - retval = 0; - } - - /* - * If we fail to request resources for the - * new port, try to restore the old settings. - */ - if (retval) { - uport->iobase = old_iobase; - uport->type = old_type; - uport->hub6 = old_hub6; - uport->iotype = old_iotype; - uport->regshift = old_shift; - uport->mapbase = old_mapbase; - - if (old_type != PORT_UNKNOWN) { - retval = uport->ops->request_port(uport); - /* - * If we failed to restore the old settings, - * we fail like this. - */ - if (retval) - uport->type = PORT_UNKNOWN; - - /* - * We failed anyway. - */ - retval = -EBUSY; - } - - /* Added to return the correct error -Ram Gupta */ - goto exit; - } + retval = uart_change_port(uport, new_info, new_port); + if (retval) + return retval; } if (change_irq) @@ -1021,9 +1003,9 @@ static int uart_set_info(struct tty_struct *tty, struct tty_port *port, uport->fifosize = new_info->xmit_fifo_size; check_and_exit: - retval = 0; if (uport->type == PORT_UNKNOWN) - goto exit; + return 0; + if (tty_port_initialized(port)) { if (((old_flags ^ uport->flags) & UPF_SPD_MASK) || old_custom_divisor != uport->custom_divisor) { @@ -1039,15 +1021,17 @@ static int uart_set_info(struct tty_struct *tty, struct tty_port *port, } uart_change_line_settings(tty, state, NULL); } - } else { - retval = uart_startup(tty, state, true); - if (retval == 0) - tty_port_set_initialized(port, true); - if (retval > 0) - retval = 0; + + return 0; } - exit: - return retval; + + retval = uart_startup(tty, state, true); + if (retval < 0) + return retval; + if (retval == 0) + tty_port_set_initialized(port, true); + + return 0; } static int uart_set_info_user(struct tty_struct *tty, struct serial_struct *ss) @@ -2365,9 +2349,9 @@ struct uart_match { struct uart_driver *driver; }; -static int serial_match_port(struct device *dev, void *data) +static int serial_match_port(struct device *dev, const void *data) { - struct uart_match *match = data; + const struct uart_match *match = data; struct tty_driver *tty_drv = match->driver->tty_driver; dev_t devt = MKDEV(tty_drv->major, tty_drv->minor_start) + match->port->line; @@ -3061,26 +3045,25 @@ static ssize_t console_store(struct device *dev, if (ret) return ret; - mutex_lock(&port->mutex); + guard(mutex)(&port->mutex); uport = uart_port_check(state); - if (uport) { - oldconsole = uart_console_registered(uport); - if (oldconsole && !newconsole) { - ret = unregister_console(uport->cons); - } else if (!oldconsole && newconsole) { - if (uart_console(uport)) { - uport->console_reinit = 1; - register_console(uport->cons); - } else { - ret = -ENOENT; - } - } - } else { - ret = -ENXIO; + if (!uport) + return -ENXIO; + + oldconsole = uart_console_registered(uport); + if (oldconsole && !newconsole) { + ret = unregister_console(uport->cons); + if (ret < 0) + return ret; + } else if (!oldconsole && newconsole) { + if (!uart_console(uport)) + return -ENOENT; + + uport->console_reinit = 1; + register_console(uport->cons); } - mutex_unlock(&port->mutex); - return ret < 0 ? ret : count; + return count; } static DEVICE_ATTR_RO(uartclk); @@ -3136,7 +3119,6 @@ static int serial_core_add_one_port(struct uart_driver *drv, struct uart_port *u { struct uart_state *state; struct tty_port *port; - int ret = 0; struct device *tty_dev; int num_groups; @@ -3146,11 +3128,9 @@ static int serial_core_add_one_port(struct uart_driver *drv, struct uart_port *u state = drv->state + uport->line; port = &state->port; - mutex_lock(&port->mutex); - if (state->uart_port) { - ret = -EINVAL; - goto out; - } + guard(mutex)(&port->mutex); + if (state->uart_port) + return -EINVAL; /* Link the port to the driver state table and vice versa */ atomic_set(&state->refcount, 1); @@ -3170,10 +3150,8 @@ static int serial_core_add_one_port(struct uart_driver *drv, struct uart_port *u uport->minor = drv->tty_driver->minor_start + uport->line; uport->name = kasprintf(GFP_KERNEL, "%s%d", drv->dev_name, drv->tty_driver->name_base + uport->line); - if (!uport->name) { - ret = -ENOMEM; - goto out; - } + if (!uport->name) + return -ENOMEM; if (uport->cons && uport->dev) of_console_check(uport->dev->of_node, uport->cons->name, uport->line); @@ -3189,10 +3167,9 @@ static int serial_core_add_one_port(struct uart_driver *drv, struct uart_port *u uport->tty_groups = kcalloc(num_groups, sizeof(*uport->tty_groups), GFP_KERNEL); - if (!uport->tty_groups) { - ret = -ENOMEM; - goto out; - } + if (!uport->tty_groups) + return -ENOMEM; + uport->tty_groups[0] = &tty_dev_attr_group; if (uport->attr_group) uport->tty_groups[1] = uport->attr_group; @@ -3215,10 +3192,7 @@ static int serial_core_add_one_port(struct uart_driver *drv, struct uart_port *u uport->line); } - out: - mutex_unlock(&port->mutex); - - return ret; + return 0; } /** @@ -3384,7 +3358,7 @@ int serial_core_register_port(struct uart_driver *drv, struct uart_port *port) struct serial_ctrl_device *ctrl_dev, *new_ctrl_dev = NULL; int ret; - mutex_lock(&port_mutex); + guard(mutex)(&port_mutex); /* * Prevent serial_port_runtime_resume() from trying to use the port @@ -3396,10 +3370,8 @@ int serial_core_register_port(struct uart_driver *drv, struct uart_port *port) ctrl_dev = serial_core_ctrl_find(drv, port->dev, port->ctrl_id); if (!ctrl_dev) { new_ctrl_dev = serial_core_ctrl_device_add(port); - if (IS_ERR(new_ctrl_dev)) { - ret = PTR_ERR(new_ctrl_dev); - goto err_unlock; - } + if (IS_ERR(new_ctrl_dev)) + return PTR_ERR(new_ctrl_dev); ctrl_dev = new_ctrl_dev; } @@ -3420,8 +3392,6 @@ int serial_core_register_port(struct uart_driver *drv, struct uart_port *port) if (ret) goto err_unregister_port_dev; - mutex_unlock(&port_mutex); - return 0; err_unregister_port_dev: @@ -3430,9 +3400,6 @@ err_unregister_port_dev: err_unregister_ctrl_dev: serial_base_ctrl_device_remove(new_ctrl_dev); -err_unlock: - mutex_unlock(&port_mutex); - return ret; } diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index 924b803af440..b1ea48f38248 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c @@ -165,6 +165,8 @@ struct sci_port { static struct sci_port sci_ports[SCI_NPORTS]; static unsigned long sci_ports_in_use; static struct uart_driver sci_uart_driver; +static bool sci_uart_earlycon; +static bool sci_uart_earlycon_dev_probing; static inline struct sci_port * to_sci_port(struct uart_port *uart) @@ -3056,10 +3058,6 @@ static int sci_init_single(struct platform_device *dev, ret = sci_init_clocks(sci_port, &dev->dev); if (ret < 0) return ret; - - port->dev = &dev->dev; - - pm_runtime_enable(&dev->dev); } port->type = p->type; @@ -3086,11 +3084,6 @@ static int sci_init_single(struct platform_device *dev, return 0; } -static void sci_cleanup_single(struct sci_port *port) -{ - pm_runtime_disable(port->port.dev); -} - #if defined(CONFIG_SERIAL_SH_SCI_CONSOLE) || \ defined(CONFIG_SERIAL_SH_SCI_EARLYCON) static void serial_console_putchar(struct uart_port *port, unsigned char ch) @@ -3260,8 +3253,6 @@ static void sci_remove(struct platform_device *dev) sci_ports_in_use &= ~BIT(port->port.line); uart_remove_one_port(&sci_uart_driver, &port->port); - sci_cleanup_single(port); - if (port->port.fifosize > 1) device_remove_file(&dev->dev, &dev_attr_rx_fifo_trigger); if (type == PORT_SCIFA || type == PORT_SCIFB || type == PORT_HSCIF) @@ -3396,7 +3387,8 @@ static struct plat_sci_port *sci_parse_dt(struct platform_device *pdev, static int sci_probe_single(struct platform_device *dev, unsigned int index, struct plat_sci_port *p, - struct sci_port *sciport) + struct sci_port *sciport, + struct resource *sci_res) { int ret; @@ -3425,6 +3417,11 @@ static int sci_probe_single(struct platform_device *dev, if (ret) return ret; + sciport->port.dev = &dev->dev; + ret = devm_pm_runtime_enable(&dev->dev); + if (ret) + return ret; + sciport->gpios = mctrl_gpio_init(&sciport->port, 0); if (IS_ERR(sciport->gpios)) return PTR_ERR(sciport->gpios); @@ -3438,18 +3435,37 @@ static int sci_probe_single(struct platform_device *dev, sciport->port.flags |= UPF_HARD_FLOW; } - ret = uart_add_one_port(&sci_uart_driver, &sciport->port); - if (ret) { - sci_cleanup_single(sciport); - return ret; + if (sci_uart_earlycon && sci_ports[0].port.mapbase == sci_res->start) { + /* + * In case: + * - this is the earlycon port (mapped on index 0 in sci_ports[]) and + * - it now maps to an alias other than zero and + * - the earlycon is still alive (e.g., "earlycon keep_bootcon" is + * available in bootargs) + * + * we need to avoid disabling clocks and PM domains through the runtime + * PM APIs called in __device_attach(). For this, increment the runtime + * PM reference counter (the clocks and PM domains were already enabled + * by the bootloader). Otherwise the earlycon may access the HW when it + * has no clocks enabled leading to failures (infinite loop in + * sci_poll_put_char()). + */ + pm_runtime_get_noresume(&dev->dev); + + /* + * Skip cleanup the sci_port[0] in early_console_exit(), this + * port is the same as the earlycon one. + */ + sci_uart_earlycon_dev_probing = true; } - return 0; + return uart_add_one_port(&sci_uart_driver, &sciport->port); } static int sci_probe(struct platform_device *dev) { struct plat_sci_port *p; + struct resource *res; struct sci_port *sp; unsigned int dev_id; int ret; @@ -3479,9 +3495,29 @@ static int sci_probe(struct platform_device *dev) } sp = &sci_ports[dev_id]; + + /* + * In case: + * - the probed port alias is zero (as the one used by earlycon), and + * - the earlycon is still active (e.g., "earlycon keep_bootcon" in + * bootargs) + * + * defer the probe of this serial. This is a debug scenario and the user + * must be aware of it. + * + * Except when the probed port is the same as the earlycon port. + */ + + res = platform_get_resource(dev, IORESOURCE_MEM, 0); + if (!res) + return -ENODEV; + + if (sci_uart_earlycon && sp == &sci_ports[0] && sp->port.mapbase != res->start) + return dev_err_probe(&dev->dev, -EBUSY, "sci_port[0] is used by earlycon!\n"); + platform_set_drvdata(dev, sp); - ret = sci_probe_single(dev, dev_id, p, sp); + ret = sci_probe_single(dev, dev_id, p, sp, res); if (ret) return ret; @@ -3562,7 +3598,23 @@ sh_early_platform_init_buffer("earlyprintk", &sci_driver, early_serial_buf, ARRAY_SIZE(early_serial_buf)); #endif #ifdef CONFIG_SERIAL_SH_SCI_EARLYCON -static struct plat_sci_port port_cfg __initdata; +static struct plat_sci_port port_cfg; + +static int early_console_exit(struct console *co) +{ + struct sci_port *sci_port = &sci_ports[0]; + + /* + * Clean the slot used by earlycon. A new SCI device might + * map to this slot. + */ + if (!sci_uart_earlycon_dev_probing) { + memset(sci_port, 0, sizeof(*sci_port)); + sci_uart_earlycon = false; + } + + return 0; +} static int __init early_console_setup(struct earlycon_device *device, int type) @@ -3571,15 +3623,18 @@ static int __init early_console_setup(struct earlycon_device *device, return -ENODEV; device->port.type = type; - memcpy(&sci_ports[0].port, &device->port, sizeof(struct uart_port)); + sci_ports[0].port = device->port; port_cfg.type = type; sci_ports[0].cfg = &port_cfg; sci_ports[0].params = sci_probe_regmap(&port_cfg); + sci_uart_earlycon = true; port_cfg.scscr = sci_serial_in(&sci_ports[0].port, SCSCR); sci_serial_out(&sci_ports[0].port, SCSCR, SCSCR_RE | SCSCR_TE | port_cfg.scscr); device->con->write = serial_console_write; + device->con->exit = early_console_exit; + return 0; } static int __init sci_early_console_setup(struct earlycon_device *device, diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c index beb151be4d32..92ec51870d1d 100644 --- a/drivers/tty/serial/xilinx_uartps.c +++ b/drivers/tty/serial/xilinx_uartps.c @@ -287,7 +287,7 @@ static void cdns_uart_handle_rx(void *dev_id, unsigned int isrstatus) continue; } - if (uart_handle_sysrq_char(port, data)) + if (uart_prepare_sysrq_char(port, data)) continue; if (is_rxbs_support) { @@ -495,7 +495,7 @@ static irqreturn_t cdns_uart_isr(int irq, void *dev_id) !(readl(port->membase + CDNS_UART_CR) & CDNS_UART_CR_RX_DIS)) cdns_uart_handle_rx(dev_id, isrstatus); - uart_port_unlock(port); + uart_unlock_and_check_sysrq(port); return IRQ_HANDLED; } @@ -1380,9 +1380,7 @@ static void cdns_uart_console_write(struct console *co, const char *s, unsigned int imr, ctrl; int locked = 1; - if (port->sysrq) - locked = 0; - else if (oops_in_progress) + if (oops_in_progress) locked = uart_port_trylock_irqsave(port, &flags); else uart_port_lock_irqsave(port, &flags); diff --git a/drivers/tty/vt/selection.c b/drivers/tty/vt/selection.c index 564341f1a74f..0bd6544e30a6 100644 --- a/drivers/tty/vt/selection.c +++ b/drivers/tty/vt/selection.c @@ -192,6 +192,20 @@ int set_selection_user(const struct tiocl_selection __user *sel, if (copy_from_user(&v, sel, sizeof(*sel))) return -EFAULT; + /* + * TIOCL_SELCLEAR, TIOCL_SELPOINTER and TIOCL_SELMOUSEREPORT are OK to + * use without CAP_SYS_ADMIN as they do not modify the selection. + */ + switch (v.sel_mode) { + case TIOCL_SELCLEAR: + case TIOCL_SELPOINTER: + case TIOCL_SELMOUSEREPORT: + break; + default: + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + } + return set_selection_kernel(&v, tty); } diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index 96842ce817af..be5564ed8c01 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -3345,8 +3345,6 @@ int tioclinux(struct tty_struct *tty, unsigned long arg) switch (type) { case TIOCL_SETSEL: - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; return set_selection_user(param, tty); case TIOCL_PASTESEL: if (!capable(CAP_SYS_ADMIN)) diff --git a/drivers/usb/host/xhci-debugfs.c b/drivers/usb/host/xhci-debugfs.c index 4f0c1b96e208..1f5ef174abea 100644 --- a/drivers/usb/host/xhci-debugfs.c +++ b/drivers/usb/host/xhci-debugfs.c @@ -232,16 +232,7 @@ static struct xhci_file_map ring_files[] = { static int xhci_ring_open(struct inode *inode, struct file *file) { - int i; - struct xhci_file_map *f_map; - const char *file_name = file_dentry(file)->d_iname; - - for (i = 0; i < ARRAY_SIZE(ring_files); i++) { - f_map = &ring_files[i]; - - if (strcmp(f_map->name, file_name) == 0) - break; - } + const struct xhci_file_map *f_map = debugfs_get_aux(file); return single_open(file, f_map->show, inode->i_private); } @@ -318,16 +309,7 @@ static struct xhci_file_map context_files[] = { static int xhci_context_open(struct inode *inode, struct file *file) { - int i; - struct xhci_file_map *f_map; - const char *file_name = file_dentry(file)->d_iname; - - for (i = 0; i < ARRAY_SIZE(context_files); i++) { - f_map = &context_files[i]; - - if (strcmp(f_map->name, file_name) == 0) - break; - } + const struct xhci_file_map *f_map = debugfs_get_aux(file); return single_open(file, f_map->show, inode->i_private); } @@ -410,7 +392,8 @@ static void xhci_debugfs_create_files(struct xhci_hcd *xhci, int i; for (i = 0; i < nentries; i++) - debugfs_create_file(files[i].name, 0444, parent, data, fops); + debugfs_create_file_aux(files[i].name, 0444, parent, + data, &files[i], fops); } static struct dentry *xhci_debugfs_create_ring_dir(struct xhci_hcd *xhci, diff --git a/drivers/usb/mtu3/mtu3_debugfs.c b/drivers/usb/mtu3/mtu3_debugfs.c index 9bd74c505872..c003049bafbf 100644 --- a/drivers/usb/mtu3/mtu3_debugfs.c +++ b/drivers/usb/mtu3/mtu3_debugfs.c @@ -257,16 +257,7 @@ static const struct mtu3_file_map mtu3_ep_files[] = { static int mtu3_ep_open(struct inode *inode, struct file *file) { - const char *file_name = file_dentry(file)->d_iname; - const struct mtu3_file_map *f_map; - int i; - - for (i = 0; i < ARRAY_SIZE(mtu3_ep_files); i++) { - f_map = &mtu3_ep_files[i]; - - if (strcmp(f_map->name, file_name) == 0) - break; - } + const struct mtu3_file_map *f_map = debugfs_get_aux(file); return single_open(file, f_map->show, inode->i_private); } @@ -289,17 +280,8 @@ static const struct debugfs_reg32 mtu3_prb_regs[] = { static int mtu3_probe_show(struct seq_file *sf, void *unused) { - const char *file_name = file_dentry(sf->file)->d_iname; struct mtu3 *mtu = sf->private; - const struct debugfs_reg32 *regs; - int i; - - for (i = 0; i < ARRAY_SIZE(mtu3_prb_regs); i++) { - regs = &mtu3_prb_regs[i]; - - if (strcmp(regs->name, file_name) == 0) - break; - } + const struct debugfs_reg32 *regs = debugfs_get_aux(sf->file); seq_printf(sf, "0x%04x - 0x%08x\n", (u32)regs->offset, mtu3_readl(mtu->ippc_base, (u32)regs->offset)); @@ -315,13 +297,11 @@ static int mtu3_probe_open(struct inode *inode, struct file *file) static ssize_t mtu3_probe_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos) { - const char *file_name = file_dentry(file)->d_iname; struct seq_file *sf = file->private_data; struct mtu3 *mtu = sf->private; - const struct debugfs_reg32 *regs; + const struct debugfs_reg32 *regs = debugfs_get_aux(file); char buf[32]; u32 val; - int i; if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) return -EFAULT; @@ -329,12 +309,6 @@ static ssize_t mtu3_probe_write(struct file *file, const char __user *ubuf, if (kstrtou32(buf, 0, &val)) return -EINVAL; - for (i = 0; i < ARRAY_SIZE(mtu3_prb_regs); i++) { - regs = &mtu3_prb_regs[i]; - - if (strcmp(regs->name, file_name) == 0) - break; - } mtu3_writel(mtu->ippc_base, (u32)regs->offset, val); return count; @@ -359,8 +333,8 @@ static void mtu3_debugfs_create_prb_files(struct mtu3 *mtu) for (i = 0; i < ARRAY_SIZE(mtu3_prb_regs); i++) { regs = &mtu3_prb_regs[i]; - debugfs_create_file(regs->name, 0644, dir_prb, - mtu, &mtu3_probe_fops); + debugfs_create_file_aux(regs->name, 0644, dir_prb, + mtu, regs, &mtu3_probe_fops); } mtu3_debugfs_regset(mtu, mtu->ippc_base, mtu3_prb_regs, @@ -380,8 +354,8 @@ static void mtu3_debugfs_create_ep_dir(struct mtu3_ep *mep, for (i = 0; i < ARRAY_SIZE(mtu3_ep_files); i++) { files = &mtu3_ep_files[i]; - debugfs_create_file(files->name, 0444, dir_ep, - mep, &mtu3_ep_fops); + debugfs_create_file_aux(files->name, 0444, dir_ep, + mep, files, &mtu3_ep_fops); } } diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c index a137e105cc55..9c76c3d0c6cf 100644 --- a/drivers/usb/typec/class.c +++ b/drivers/usb/typec/class.c @@ -230,10 +230,10 @@ static const char * const usb_modes[] = { /* ------------------------------------------------------------------------- */ /* Alternate Modes */ -static int altmode_match(struct device *dev, void *data) +static int altmode_match(struct device *dev, const void *data) { struct typec_altmode *adev = to_typec_altmode(dev); - struct typec_device_id *id = data; + const struct typec_device_id *id = data; if (!is_typec_altmode(dev)) return 0; @@ -1284,11 +1284,6 @@ const struct device_type typec_cable_dev_type = { .release = typec_cable_release, }; -static int cable_match(struct device *dev, void *data) -{ - return is_typec_cable(dev); -} - /** * typec_cable_get - Get a reference to the USB Type-C cable * @port: The USB Type-C Port the cable is connected to @@ -1300,7 +1295,8 @@ struct typec_cable *typec_cable_get(struct typec_port *port) { struct device *dev; - dev = device_find_child(&port->dev, NULL, cable_match); + dev = device_find_child(&port->dev, &typec_cable_dev_type, + device_match_type); if (!dev) return NULL; @@ -2030,16 +2026,12 @@ const struct device_type typec_port_dev_type = { /* --------------------------------------- */ /* Driver callbacks to report role updates */ -static int partner_match(struct device *dev, void *data) -{ - return is_typec_partner(dev); -} - static struct typec_partner *typec_get_partner(struct typec_port *port) { struct device *dev; - dev = device_find_child(&port->dev, NULL, partner_match); + dev = device_find_child(&port->dev, &typec_partner_dev_type, + device_match_type); if (!dev) return NULL; @@ -2172,7 +2164,9 @@ void typec_set_pwr_opmode(struct typec_port *port, sysfs_notify(&port->dev.kobj, NULL, "power_operation_mode"); kobject_uevent(&port->dev.kobj, KOBJ_CHANGE); - partner_dev = device_find_child(&port->dev, NULL, partner_match); + partner_dev = device_find_child(&port->dev, + &typec_partner_dev_type, + device_match_type); if (partner_dev) { struct typec_partner *partner = to_typec_partner(partner_dev); @@ -2336,7 +2330,9 @@ int typec_get_negotiated_svdm_version(struct typec_port *port) enum usb_pd_svdm_ver svdm_version; struct device *partner_dev; - partner_dev = device_find_child(&port->dev, NULL, partner_match); + partner_dev = device_find_child(&port->dev, + &typec_partner_dev_type, + device_match_type); if (!partner_dev) return -ENODEV; @@ -2363,7 +2359,8 @@ int typec_get_cable_svdm_version(struct typec_port *port) enum usb_pd_svdm_ver svdm_version; struct device *cable_dev; - cable_dev = device_find_child(&port->dev, NULL, cable_match); + cable_dev = device_find_child(&port->dev, &typec_cable_dev_type, + device_match_type); if (!cable_dev) return -ENODEV; diff --git a/drivers/vfio/mdev/mdev_core.c b/drivers/vfio/mdev/mdev_core.c index ed4737de4528..f2e686f8f1ef 100644 --- a/drivers/vfio/mdev/mdev_core.c +++ b/drivers/vfio/mdev/mdev_core.c @@ -76,7 +76,7 @@ int mdev_register_parent(struct mdev_parent *parent, struct device *dev, if (ret) return ret; - ret = class_compat_create_link(mdev_bus_compat_class, dev, NULL); + ret = class_compat_create_link(mdev_bus_compat_class, dev); if (ret) dev_warn(dev, "Failed to create compatibility class link\n"); @@ -98,7 +98,7 @@ void mdev_unregister_parent(struct mdev_parent *parent) dev_info(parent->dev, "MDEV: Unregistering\n"); down_write(&parent->unreg_sem); - class_compat_remove_link(mdev_bus_compat_class, parent->dev, NULL); + class_compat_remove_link(mdev_bus_compat_class, parent->dev); device_for_each_child(parent->dev, NULL, mdev_device_remove_cb); parent_remove_sysfs_files(parent); up_write(&parent->unreg_sem); diff --git a/drivers/vfio/pci/nvgrace-gpu/main.c b/drivers/vfio/pci/nvgrace-gpu/main.c index a467085038f0..e5ac39c4cc6b 100644 --- a/drivers/vfio/pci/nvgrace-gpu/main.c +++ b/drivers/vfio/pci/nvgrace-gpu/main.c @@ -5,6 +5,8 @@ #include <linux/sizes.h> #include <linux/vfio_pci_core.h> +#include <linux/delay.h> +#include <linux/jiffies.h> /* * The device memory usable to the workloads running in the VM is cached @@ -17,12 +19,21 @@ #define RESMEM_REGION_INDEX VFIO_PCI_BAR2_REGION_INDEX #define USEMEM_REGION_INDEX VFIO_PCI_BAR4_REGION_INDEX -/* Memory size expected as non cached and reserved by the VM driver */ -#define RESMEM_SIZE SZ_1G - /* A hardwired and constant ABI value between the GPU FW and VFIO driver. */ #define MEMBLK_SIZE SZ_512M +#define DVSEC_BITMAP_OFFSET 0xA +#define MIG_SUPPORTED_WITH_CACHED_RESMEM BIT(0) + +#define GPU_CAP_DVSEC_REGISTER 3 + +#define C2C_LINK_BAR0_OFFSET 0x1498 +#define HBM_TRAINING_BAR0_OFFSET 0x200BC +#define STATUS_READY 0xFF + +#define POLL_QUANTUM_MS 1000 +#define POLL_TIMEOUT_MS (30 * 1000) + /* * The state of the two device memory region - resmem and usemem - is * saved as struct mem_region. @@ -46,6 +57,7 @@ struct nvgrace_gpu_pci_core_device { struct mem_region resmem; /* Lock to control device memory kernel mapping */ struct mutex remap_lock; + bool has_mig_hw_bug; }; static void nvgrace_gpu_init_fake_bar_emu_regs(struct vfio_device *core_vdev) @@ -66,7 +78,7 @@ nvgrace_gpu_memregion(int index, if (index == USEMEM_REGION_INDEX) return &nvdev->usemem; - if (index == RESMEM_REGION_INDEX) + if (nvdev->resmem.memlength && index == RESMEM_REGION_INDEX) return &nvdev->resmem; return NULL; @@ -751,40 +763,67 @@ nvgrace_gpu_init_nvdev_struct(struct pci_dev *pdev, u64 memphys, u64 memlength) { int ret = 0; + u64 resmem_size = 0; /* - * The VM GPU device driver needs a non-cacheable region to support - * the MIG feature. Since the device memory is mapped as NORMAL cached, - * carve out a region from the end with a different NORMAL_NC - * property (called as reserved memory and represented as resmem). This - * region then is exposed as a 64b BAR (region 2 and 3) to the VM, while - * exposing the rest (termed as usable memory and represented using usemem) - * as cacheable 64b BAR (region 4 and 5). + * On Grace Hopper systems, the VM GPU device driver needs a non-cacheable + * region to support the MIG feature owing to a hardware bug. Since the + * device memory is mapped as NORMAL cached, carve out a region from the end + * with a different NORMAL_NC property (called as reserved memory and + * represented as resmem). This region then is exposed as a 64b BAR + * (region 2 and 3) to the VM, while exposing the rest (termed as usable + * memory and represented using usemem) as cacheable 64b BAR (region 4 and 5). * * devmem (memlength) * |-------------------------------------------------| * | | * usemem.memphys resmem.memphys + * + * This hardware bug is fixed on the Grace Blackwell platforms and the + * presence of the bug can be determined through nvdev->has_mig_hw_bug. + * Thus on systems with the hardware fix, there is no need to partition + * the GPU device memory and the entire memory is usable and mapped as + * NORMAL cached (i.e. resmem size is 0). */ + if (nvdev->has_mig_hw_bug) + resmem_size = SZ_1G; + nvdev->usemem.memphys = memphys; /* * The device memory exposed to the VM is added to the kernel by the - * VM driver module in chunks of memory block size. Only the usable - * memory (usemem) is added to the kernel for usage by the VM - * workloads. Make the usable memory size memblock aligned. + * VM driver module in chunks of memory block size. Note that only the + * usable memory (usemem) is added to the kernel for usage by the VM + * workloads. */ - if (check_sub_overflow(memlength, RESMEM_SIZE, + if (check_sub_overflow(memlength, resmem_size, &nvdev->usemem.memlength)) { ret = -EOVERFLOW; goto done; } /* - * The USEMEM part of the device memory has to be MEMBLK_SIZE - * aligned. This is a hardwired ABI value between the GPU FW and - * VFIO driver. The VM device driver is also aware of it and make - * use of the value for its calculation to determine USEMEM size. + * The usemem region is exposed as a 64B Bar composed of region 4 and 5. + * Calculate and save the BAR size for the region. + */ + nvdev->usemem.bar_size = roundup_pow_of_two(nvdev->usemem.memlength); + + /* + * If the hardware has the fix for MIG, there is no requirement + * for splitting the device memory to create RESMEM. The entire + * device memory is usable and will be USEMEM. Return here for + * such case. + */ + if (!nvdev->has_mig_hw_bug) + goto done; + + /* + * When the device memory is split to workaround the MIG bug on + * Grace Hopper, the USEMEM part of the device memory has to be + * MEMBLK_SIZE aligned. This is a hardwired ABI value between the + * GPU FW and VFIO driver. The VM device driver is also aware of it + * and make use of the value for its calculation to determine USEMEM + * size. Note that the device memory may not be 512M aligned. */ nvdev->usemem.memlength = round_down(nvdev->usemem.memlength, MEMBLK_SIZE); @@ -803,15 +842,93 @@ nvgrace_gpu_init_nvdev_struct(struct pci_dev *pdev, } /* - * The memory regions are exposed as BARs. Calculate and save - * the BAR size for them. + * The resmem region is exposed as a 64b BAR composed of region 2 and 3 + * for Grace Hopper. Calculate and save the BAR size for the region. */ - nvdev->usemem.bar_size = roundup_pow_of_two(nvdev->usemem.memlength); nvdev->resmem.bar_size = roundup_pow_of_two(nvdev->resmem.memlength); done: return ret; } +static bool nvgrace_gpu_has_mig_hw_bug(struct pci_dev *pdev) +{ + int pcie_dvsec; + u16 dvsec_ctrl16; + + pcie_dvsec = pci_find_dvsec_capability(pdev, PCI_VENDOR_ID_NVIDIA, + GPU_CAP_DVSEC_REGISTER); + + if (pcie_dvsec) { + pci_read_config_word(pdev, + pcie_dvsec + DVSEC_BITMAP_OFFSET, + &dvsec_ctrl16); + + if (dvsec_ctrl16 & MIG_SUPPORTED_WITH_CACHED_RESMEM) + return false; + } + + return true; +} + +/* + * To reduce the system bootup time, the HBM training has + * been moved out of the UEFI on the Grace-Blackwell systems. + * + * The onus of checking whether the HBM training has completed + * thus falls on the module. The HBM training status can be + * determined from a BAR0 register. + * + * Similarly, another BAR0 register exposes the status of the + * CPU-GPU chip-to-chip (C2C) cache coherent interconnect. + * + * Poll these register and check for 30s. If the HBM training is + * not complete or if the C2C link is not ready, fail the probe. + * + * While the wait is not required on Grace Hopper systems, it + * is beneficial to make the check to ensure the device is in an + * expected state. + * + * Ensure that the BAR0 region is enabled before accessing the + * registers. + */ +static int nvgrace_gpu_wait_device_ready(struct pci_dev *pdev) +{ + unsigned long timeout = jiffies + msecs_to_jiffies(POLL_TIMEOUT_MS); + void __iomem *io; + int ret = -ETIME; + + ret = pci_enable_device(pdev); + if (ret) + return ret; + + ret = pci_request_selected_regions(pdev, 1 << 0, KBUILD_MODNAME); + if (ret) + goto request_region_exit; + + io = pci_iomap(pdev, 0, 0); + if (!io) { + ret = -ENOMEM; + goto iomap_exit; + } + + do { + if ((ioread32(io + C2C_LINK_BAR0_OFFSET) == STATUS_READY) && + (ioread32(io + HBM_TRAINING_BAR0_OFFSET) == STATUS_READY)) { + ret = 0; + goto reg_check_exit; + } + msleep(POLL_QUANTUM_MS); + } while (!time_after(jiffies, timeout)); + +reg_check_exit: + pci_iounmap(pdev, io); +iomap_exit: + pci_release_selected_regions(pdev, 1 << 0); +request_region_exit: + pci_disable_device(pdev); + return ret; +} + static int nvgrace_gpu_probe(struct pci_dev *pdev, const struct pci_device_id *id) { @@ -820,6 +937,10 @@ static int nvgrace_gpu_probe(struct pci_dev *pdev, u64 memphys, memlength; int ret; + ret = nvgrace_gpu_wait_device_ready(pdev); + if (ret) + return ret; + ret = nvgrace_gpu_fetch_memory_property(pdev, &memphys, &memlength); if (!ret) ops = &nvgrace_gpu_pci_ops; @@ -832,6 +953,8 @@ static int nvgrace_gpu_probe(struct pci_dev *pdev, dev_set_drvdata(&pdev->dev, &nvdev->core_device); if (ops == &nvgrace_gpu_pci_ops) { + nvdev->has_mig_hw_bug = nvgrace_gpu_has_mig_hw_bug(pdev); + /* * Device memory properties are identified in the host ACPI * table. Set the nvgrace_gpu_pci_core_device structure. @@ -868,6 +991,8 @@ static const struct pci_device_id nvgrace_gpu_vfio_pci_table[] = { { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_NVIDIA, 0x2345) }, /* GH200 SKU */ { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_NVIDIA, 0x2348) }, + /* GB200 SKU */ + { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_NVIDIA, 0x2941) }, {} }; diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c index 5572fd99b921..94142581c98c 100644 --- a/drivers/vfio/pci/vfio_pci_config.c +++ b/drivers/vfio/pci/vfio_pci_config.c @@ -511,13 +511,13 @@ static void vfio_bar_fixup(struct vfio_pci_core_device *vdev) mask = ~(pci_resource_len(pdev, PCI_ROM_RESOURCE) - 1); mask |= PCI_ROM_ADDRESS_ENABLE; *vbar &= cpu_to_le32((u32)mask); - } else if (pdev->resource[PCI_ROM_RESOURCE].flags & - IORESOURCE_ROM_SHADOW) { - mask = ~(0x20000 - 1); + } else if (pdev->rom && pdev->romlen) { + mask = ~(roundup_pow_of_two(pdev->romlen) - 1); mask |= PCI_ROM_ADDRESS_ENABLE; *vbar &= cpu_to_le32((u32)mask); - } else + } else { *vbar = 0; + } vdev->bardirty = false; } diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c index 1a4ed5a357d3..586e49efb81b 100644 --- a/drivers/vfio/pci/vfio_pci_core.c +++ b/drivers/vfio/pci/vfio_pci_core.c @@ -1054,31 +1054,27 @@ static int vfio_pci_ioctl_get_region_info(struct vfio_pci_core_device *vdev, info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); info.flags = 0; + info.size = 0; - /* Report the BAR size, not the ROM size */ - info.size = pci_resource_len(pdev, info.index); - if (!info.size) { - /* Shadow ROMs appear as PCI option ROMs */ - if (pdev->resource[PCI_ROM_RESOURCE].flags & - IORESOURCE_ROM_SHADOW) - info.size = 0x20000; - else - break; - } - - /* - * Is it really there? Enable memory decode for implicit access - * in pci_map_rom(). - */ - cmd = vfio_pci_memory_lock_and_enable(vdev); - io = pci_map_rom(pdev, &size); - if (io) { + if (pci_resource_start(pdev, PCI_ROM_RESOURCE)) { + /* + * Check ROM content is valid. Need to enable memory + * decode for ROM access in pci_map_rom(). + */ + cmd = vfio_pci_memory_lock_and_enable(vdev); + io = pci_map_rom(pdev, &size); + if (io) { + info.flags = VFIO_REGION_INFO_FLAG_READ; + /* Report the BAR size, not the ROM size. */ + info.size = pci_resource_len(pdev, PCI_ROM_RESOURCE); + pci_unmap_rom(pdev, io); + } + vfio_pci_memory_unlock_and_restore(vdev, cmd); + } else if (pdev->rom && pdev->romlen) { info.flags = VFIO_REGION_INFO_FLAG_READ; - pci_unmap_rom(pdev, io); - } else { - info.size = 0; + /* Report BAR size as power of two. */ + info.size = roundup_pow_of_two(pdev->romlen); } - vfio_pci_memory_unlock_and_restore(vdev, cmd); break; } diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c index 66b72c289284..6192788c8ba3 100644 --- a/drivers/vfio/pci/vfio_pci_rdwr.c +++ b/drivers/vfio/pci/vfio_pci_rdwr.c @@ -16,6 +16,7 @@ #include <linux/io.h> #include <linux/vfio.h> #include <linux/vgaarb.h> +#include <linux/io-64-nonatomic-lo-hi.h> #include "vfio_pci_priv.h" @@ -61,9 +62,7 @@ EXPORT_SYMBOL_GPL(vfio_pci_core_iowrite##size); VFIO_IOWRITE(8) VFIO_IOWRITE(16) VFIO_IOWRITE(32) -#ifdef iowrite64 VFIO_IOWRITE(64) -#endif #define VFIO_IOREAD(size) \ int vfio_pci_core_ioread##size(struct vfio_pci_core_device *vdev, \ @@ -89,9 +88,7 @@ EXPORT_SYMBOL_GPL(vfio_pci_core_ioread##size); VFIO_IOREAD(8) VFIO_IOREAD(16) VFIO_IOREAD(32) -#ifdef ioread64 VFIO_IOREAD(64) -#endif #define VFIO_IORDWR(size) \ static int vfio_pci_iordwr##size(struct vfio_pci_core_device *vdev,\ @@ -127,9 +124,7 @@ static int vfio_pci_iordwr##size(struct vfio_pci_core_device *vdev,\ VFIO_IORDWR(8) VFIO_IORDWR(16) VFIO_IORDWR(32) -#if defined(ioread64) && defined(iowrite64) VFIO_IORDWR(64) -#endif /* * Read or write from an __iomem region (MMIO or I/O port) with an excluded @@ -155,7 +150,6 @@ ssize_t vfio_pci_core_do_io_rw(struct vfio_pci_core_device *vdev, bool test_mem, else fillable = 0; -#if defined(ioread64) && defined(iowrite64) if (fillable >= 8 && !(off % 8)) { ret = vfio_pci_iordwr64(vdev, iswrite, test_mem, io, buf, off, &filled); @@ -163,7 +157,6 @@ ssize_t vfio_pci_core_do_io_rw(struct vfio_pci_core_device *vdev, bool test_mem, return ret; } else -#endif if (fillable >= 4 && !(off % 4)) { ret = vfio_pci_iordwr32(vdev, iswrite, test_mem, io, buf, off, &filled); @@ -244,9 +237,8 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_core_device *vdev, char __user *buf, if (pci_resource_start(pdev, bar)) end = pci_resource_len(pdev, bar); - else if (bar == PCI_ROM_RESOURCE && - pdev->resource[bar].flags & IORESOURCE_ROM_SHADOW) - end = 0x20000; + else if (bar == PCI_ROM_RESOURCE && pdev->rom && pdev->romlen) + end = roundup_pow_of_two(pdev->romlen); else return -EINVAL; @@ -261,11 +253,14 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_core_device *vdev, char __user *buf, * excluded range at the end of the actual ROM. This makes * filling large ROM BARs much faster. */ - io = pci_map_rom(pdev, &x_start); - if (!io) { - done = -ENOMEM; - goto out; + if (pci_resource_start(pdev, bar)) { + io = pci_map_rom(pdev, &x_start); + } else { + io = ioremap(pdev->rom, pdev->romlen); + x_start = pdev->romlen; } + if (!io) + return -ENOMEM; x_end = end; } else { int ret = vfio_pci_core_setup_barmap(vdev, bar); @@ -288,8 +283,13 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_core_device *vdev, char __user *buf, if (done >= 0) *ppos += done; - if (bar == PCI_ROM_RESOURCE) - pci_unmap_rom(pdev, io); + if (bar == PCI_ROM_RESOURCE) { + if (pci_resource_start(pdev, bar)) + pci_unmap_rom(pdev, io); + else + iounmap(io); + } + out: return done; } @@ -381,12 +381,10 @@ static void vfio_pci_ioeventfd_do_write(struct vfio_pci_ioeventfd *ioeventfd, vfio_pci_core_iowrite32(ioeventfd->vdev, test_mem, ioeventfd->data, ioeventfd->addr); break; -#ifdef iowrite64 case 8: vfio_pci_core_iowrite64(ioeventfd->vdev, test_mem, ioeventfd->data, ioeventfd->addr); break; -#endif } } @@ -440,10 +438,8 @@ int vfio_pci_ioeventfd(struct vfio_pci_core_device *vdev, loff_t offset, pos >= vdev->msix_offset + vdev->msix_size)) return -EINVAL; -#ifndef iowrite64 if (count == 8) return -EINVAL; -#endif ret = vfio_pci_core_setup_barmap(vdev, bar); if (ret) diff --git a/drivers/vfio/platform/vfio_platform_common.c b/drivers/vfio/platform/vfio_platform_common.c index e53757d1d095..3bf1043cd795 100644 --- a/drivers/vfio/platform/vfio_platform_common.c +++ b/drivers/vfio/platform/vfio_platform_common.c @@ -388,6 +388,11 @@ static ssize_t vfio_platform_read_mmio(struct vfio_platform_region *reg, { unsigned int done = 0; + if (off >= reg->size) + return -EINVAL; + + count = min_t(size_t, count, reg->size - off); + if (!reg->ioaddr) { reg->ioaddr = ioremap(reg->addr, reg->size); @@ -467,6 +472,11 @@ static ssize_t vfio_platform_write_mmio(struct vfio_platform_region *reg, { unsigned int done = 0; + if (off >= reg->size) + return -EINVAL; + + count = min_t(size_t, count, reg->size - off); + if (!reg->ioaddr) { reg->ioaddr = ioremap(reg->addr, reg->size); diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c index 16e198a26339..e33cc77699cd 100644 --- a/fs/debugfs/file.c +++ b/fs/debugfs/file.c @@ -47,11 +47,17 @@ const struct file_operations debugfs_noop_file_operations = { #define F_DENTRY(filp) ((filp)->f_path.dentry) +const void *debugfs_get_aux(const struct file *file) +{ + return DEBUGFS_I(file_inode(file))->aux; +} +EXPORT_SYMBOL_GPL(debugfs_get_aux); + const struct file_operations *debugfs_real_fops(const struct file *filp) { struct debugfs_fsdata *fsd = F_DENTRY(filp)->d_fsdata; - if ((unsigned long)fsd & DEBUGFS_FSDATA_IS_REAL_FOPS_BIT) { + if (!fsd) { /* * Urgh, we've been called w/o a protecting * debugfs_file_get(). @@ -84,9 +90,11 @@ static int __debugfs_file_get(struct dentry *dentry, enum dbgfs_get_mode mode) return -EINVAL; d_fsd = READ_ONCE(dentry->d_fsdata); - if (!((unsigned long)d_fsd & DEBUGFS_FSDATA_IS_REAL_FOPS_BIT)) { + if (d_fsd) { fsd = d_fsd; } else { + struct inode *inode = dentry->d_inode; + if (WARN_ON(mode == DBGFS_GET_ALREADY)) return -EINVAL; @@ -95,23 +103,38 @@ static int __debugfs_file_get(struct dentry *dentry, enum dbgfs_get_mode mode) return -ENOMEM; if (mode == DBGFS_GET_SHORT) { - fsd->real_fops = NULL; - fsd->short_fops = (void *)((unsigned long)d_fsd & - ~DEBUGFS_FSDATA_IS_REAL_FOPS_BIT); + const struct debugfs_short_fops *ops; + ops = fsd->short_fops = DEBUGFS_I(inode)->short_fops; + if (ops->llseek) + fsd->methods |= HAS_LSEEK; + if (ops->read) + fsd->methods |= HAS_READ; + if (ops->write) + fsd->methods |= HAS_WRITE; } else { - fsd->real_fops = (void *)((unsigned long)d_fsd & - ~DEBUGFS_FSDATA_IS_REAL_FOPS_BIT); - fsd->short_fops = NULL; + const struct file_operations *ops; + ops = fsd->real_fops = DEBUGFS_I(inode)->real_fops; + if (ops->llseek) + fsd->methods |= HAS_LSEEK; + if (ops->read) + fsd->methods |= HAS_READ; + if (ops->write) + fsd->methods |= HAS_WRITE; + if (ops->unlocked_ioctl) + fsd->methods |= HAS_IOCTL; + if (ops->poll) + fsd->methods |= HAS_POLL; } refcount_set(&fsd->active_users, 1); init_completion(&fsd->active_users_drained); INIT_LIST_HEAD(&fsd->cancellations); mutex_init(&fsd->cancellations_mtx); - if (cmpxchg(&dentry->d_fsdata, d_fsd, fsd) != d_fsd) { + d_fsd = cmpxchg(&dentry->d_fsdata, NULL, fsd); + if (d_fsd) { mutex_destroy(&fsd->cancellations_mtx); kfree(fsd); - fsd = READ_ONCE(dentry->d_fsdata); + fsd = d_fsd; } } @@ -208,8 +231,7 @@ void debugfs_enter_cancellation(struct file *file, return; fsd = READ_ONCE(dentry->d_fsdata); - if (WARN_ON(!fsd || - ((unsigned long)fsd & DEBUGFS_FSDATA_IS_REAL_FOPS_BIT))) + if (WARN_ON(!fsd)) return; mutex_lock(&fsd->cancellations_mtx); @@ -240,8 +262,7 @@ void debugfs_leave_cancellation(struct file *file, return; fsd = READ_ONCE(dentry->d_fsdata); - if (WARN_ON(!fsd || - ((unsigned long)fsd & DEBUGFS_FSDATA_IS_REAL_FOPS_BIT))) + if (WARN_ON(!fsd)) return; mutex_lock(&fsd->cancellations_mtx); @@ -322,13 +343,16 @@ const struct file_operations debugfs_open_proxy_file_operations = { #define PROTO(args...) args #define ARGS(args...) args -#define FULL_PROXY_FUNC(name, ret_type, filp, proto, args) \ +#define FULL_PROXY_FUNC(name, ret_type, filp, proto, args, bit, ret) \ static ret_type full_proxy_ ## name(proto) \ { \ - struct dentry *dentry = F_DENTRY(filp); \ + struct dentry *dentry = F_DENTRY(filp); \ + struct debugfs_fsdata *fsd = dentry->d_fsdata; \ const struct file_operations *real_fops; \ ret_type r; \ \ + if (!(fsd->methods & bit)) \ + return ret; \ r = debugfs_file_get(dentry); \ if (unlikely(r)) \ return r; \ @@ -338,17 +362,18 @@ static ret_type full_proxy_ ## name(proto) \ return r; \ } -#define FULL_PROXY_FUNC_BOTH(name, ret_type, filp, proto, args) \ +#define FULL_PROXY_FUNC_BOTH(name, ret_type, filp, proto, args, bit, ret) \ static ret_type full_proxy_ ## name(proto) \ { \ struct dentry *dentry = F_DENTRY(filp); \ - struct debugfs_fsdata *fsd; \ + struct debugfs_fsdata *fsd = dentry->d_fsdata; \ ret_type r; \ \ + if (!(fsd->methods & bit)) \ + return ret; \ r = debugfs_file_get(dentry); \ if (unlikely(r)) \ return r; \ - fsd = dentry->d_fsdata; \ if (fsd->real_fops) \ r = fsd->real_fops->name(args); \ else \ @@ -359,29 +384,32 @@ static ret_type full_proxy_ ## name(proto) \ FULL_PROXY_FUNC_BOTH(llseek, loff_t, filp, PROTO(struct file *filp, loff_t offset, int whence), - ARGS(filp, offset, whence)); + ARGS(filp, offset, whence), HAS_LSEEK, -ESPIPE); FULL_PROXY_FUNC_BOTH(read, ssize_t, filp, PROTO(struct file *filp, char __user *buf, size_t size, loff_t *ppos), - ARGS(filp, buf, size, ppos)); + ARGS(filp, buf, size, ppos), HAS_READ, -EINVAL); FULL_PROXY_FUNC_BOTH(write, ssize_t, filp, PROTO(struct file *filp, const char __user *buf, size_t size, loff_t *ppos), - ARGS(filp, buf, size, ppos)); + ARGS(filp, buf, size, ppos), HAS_WRITE, -EINVAL); FULL_PROXY_FUNC(unlocked_ioctl, long, filp, PROTO(struct file *filp, unsigned int cmd, unsigned long arg), - ARGS(filp, cmd, arg)); + ARGS(filp, cmd, arg), HAS_IOCTL, -ENOTTY); static __poll_t full_proxy_poll(struct file *filp, struct poll_table_struct *wait) { struct dentry *dentry = F_DENTRY(filp); + struct debugfs_fsdata *fsd = dentry->d_fsdata; __poll_t r = 0; const struct file_operations *real_fops; + if (!(fsd->methods & HAS_POLL)) + return DEFAULT_POLLMASK; if (debugfs_file_get(dentry)) return EPOLLHUP; @@ -393,9 +421,7 @@ static __poll_t full_proxy_poll(struct file *filp, static int full_proxy_release(struct inode *inode, struct file *filp) { - const struct dentry *dentry = F_DENTRY(filp); const struct file_operations *real_fops = debugfs_real_fops(filp); - const struct file_operations *proxy_fops = filp->f_op; int r = 0; /* @@ -404,49 +430,21 @@ static int full_proxy_release(struct inode *inode, struct file *filp) * not to leak any resources. Releasers must not assume that * ->i_private is still being meaningful here. */ - if (real_fops && real_fops->release) + if (real_fops->release) r = real_fops->release(inode, filp); - replace_fops(filp, d_inode(dentry)->i_fop); - kfree(proxy_fops); fops_put(real_fops); return r; } -static void __full_proxy_fops_init(struct file_operations *proxy_fops, - struct debugfs_fsdata *fsd) -{ - proxy_fops->release = full_proxy_release; - - if ((fsd->real_fops && fsd->real_fops->llseek) || - (fsd->short_fops && fsd->short_fops->llseek)) - proxy_fops->llseek = full_proxy_llseek; - - if ((fsd->real_fops && fsd->real_fops->read) || - (fsd->short_fops && fsd->short_fops->read)) - proxy_fops->read = full_proxy_read; - - if ((fsd->real_fops && fsd->real_fops->write) || - (fsd->short_fops && fsd->short_fops->write)) - proxy_fops->write = full_proxy_write; - - if (fsd->real_fops && fsd->real_fops->poll) - proxy_fops->poll = full_proxy_poll; - - if (fsd->real_fops && fsd->real_fops->unlocked_ioctl) - proxy_fops->unlocked_ioctl = full_proxy_unlocked_ioctl; -} - -static int full_proxy_open(struct inode *inode, struct file *filp, - enum dbgfs_get_mode mode) +static int full_proxy_open_regular(struct inode *inode, struct file *filp) { struct dentry *dentry = F_DENTRY(filp); const struct file_operations *real_fops; - struct file_operations *proxy_fops = NULL; struct debugfs_fsdata *fsd; int r; - r = __debugfs_file_get(dentry, mode); + r = __debugfs_file_get(dentry, DBGFS_GET_REGULAR); if (r) return r == -EIO ? -ENOENT : r; @@ -456,7 +454,7 @@ static int full_proxy_open(struct inode *inode, struct file *filp, if (r) goto out; - if (real_fops && !fops_get(real_fops)) { + if (!fops_get(real_fops)) { #ifdef CONFIG_MODULES if (real_fops->owner && real_fops->owner->state == MODULE_STATE_GOING) { @@ -472,55 +470,52 @@ static int full_proxy_open(struct inode *inode, struct file *filp, goto out; } - proxy_fops = kzalloc(sizeof(*proxy_fops), GFP_KERNEL); - if (!proxy_fops) { - r = -ENOMEM; - goto free_proxy; - } - __full_proxy_fops_init(proxy_fops, fsd); - replace_fops(filp, proxy_fops); - - if (!real_fops || real_fops->open) { - if (real_fops) - r = real_fops->open(inode, filp); - else - r = simple_open(inode, filp); + if (real_fops->open) { + r = real_fops->open(inode, filp); if (r) { - replace_fops(filp, d_inode(dentry)->i_fop); - goto free_proxy; - } else if (filp->f_op != proxy_fops) { + fops_put(real_fops); + } else if (filp->f_op != &debugfs_full_proxy_file_operations) { /* No protection against file removal anymore. */ WARN(1, "debugfs file owner replaced proxy fops: %pd", dentry); - goto free_proxy; + fops_put(real_fops); } } - - goto out; -free_proxy: - kfree(proxy_fops); - fops_put(real_fops); out: debugfs_file_put(dentry); return r; } -static int full_proxy_open_regular(struct inode *inode, struct file *filp) -{ - return full_proxy_open(inode, filp, DBGFS_GET_REGULAR); -} - const struct file_operations debugfs_full_proxy_file_operations = { .open = full_proxy_open_regular, + .release = full_proxy_release, + .llseek = full_proxy_llseek, + .read = full_proxy_read, + .write = full_proxy_write, + .poll = full_proxy_poll, + .unlocked_ioctl = full_proxy_unlocked_ioctl }; static int full_proxy_open_short(struct inode *inode, struct file *filp) { - return full_proxy_open(inode, filp, DBGFS_GET_SHORT); + struct dentry *dentry = F_DENTRY(filp); + int r; + + r = __debugfs_file_get(dentry, DBGFS_GET_SHORT); + if (r) + return r == -EIO ? -ENOENT : r; + r = debugfs_locked_down(inode, filp, NULL); + if (!r) + r = simple_open(inode, filp); + debugfs_file_put(dentry); + return r; } const struct file_operations debugfs_full_short_proxy_file_operations = { .open = full_proxy_open_short, + .llseek = full_proxy_llseek, + .read = full_proxy_read, + .write = full_proxy_write, }; ssize_t debugfs_attr_read(struct file *file, char __user *buf, diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c index e752009de929..75715d8877ee 100644 --- a/fs/debugfs/inode.c +++ b/fs/debugfs/inode.c @@ -208,16 +208,34 @@ static int debugfs_show_options(struct seq_file *m, struct dentry *root) return 0; } +static struct kmem_cache *debugfs_inode_cachep __ro_after_init; + +static void init_once(void *foo) +{ + struct debugfs_inode_info *info = foo; + inode_init_once(&info->vfs_inode); +} + +static struct inode *debugfs_alloc_inode(struct super_block *sb) +{ + struct debugfs_inode_info *info; + info = alloc_inode_sb(sb, debugfs_inode_cachep, GFP_KERNEL); + if (!info) + return NULL; + return &info->vfs_inode; +} + static void debugfs_free_inode(struct inode *inode) { if (S_ISLNK(inode->i_mode)) kfree(inode->i_link); - free_inode_nonrcu(inode); + kmem_cache_free(debugfs_inode_cachep, DEBUGFS_I(inode)); } static const struct super_operations debugfs_super_operations = { .statfs = simple_statfs, .show_options = debugfs_show_options, + .alloc_inode = debugfs_alloc_inode, .free_inode = debugfs_free_inode, }; @@ -225,23 +243,18 @@ static void debugfs_release_dentry(struct dentry *dentry) { struct debugfs_fsdata *fsd = dentry->d_fsdata; - if ((unsigned long)fsd & DEBUGFS_FSDATA_IS_REAL_FOPS_BIT) - return; - - /* check it wasn't a dir (no fsdata) or automount (no real_fops) */ - if (fsd && (fsd->real_fops || fsd->short_fops)) { + if (fsd) { WARN_ON(!list_empty(&fsd->cancellations)); mutex_destroy(&fsd->cancellations_mtx); } - kfree(fsd); } static struct vfsmount *debugfs_automount(struct path *path) { - struct debugfs_fsdata *fsd = path->dentry->d_fsdata; + struct inode *inode = path->dentry->d_inode; - return fsd->automount(path->dentry, d_inode(path->dentry)->i_private); + return DEBUGFS_I(inode)->automount(path->dentry, inode->i_private); } static const struct dentry_operations debugfs_dops = { @@ -411,6 +424,7 @@ static struct dentry *end_creating(struct dentry *dentry) static struct dentry *__debugfs_create_file(const char *name, umode_t mode, struct dentry *parent, void *data, + const void *aux, const struct file_operations *proxy_fops, const void *real_fops) { @@ -441,9 +455,11 @@ static struct dentry *__debugfs_create_file(const char *name, umode_t mode, inode->i_private = data; inode->i_op = &debugfs_file_inode_operations; + if (!real_fops) + proxy_fops = &debugfs_noop_file_operations; inode->i_fop = proxy_fops; - dentry->d_fsdata = (void *)((unsigned long)real_fops | - DEBUGFS_FSDATA_IS_REAL_FOPS_BIT); + DEBUGFS_I(inode)->raw = real_fops; + DEBUGFS_I(inode)->aux = aux; d_instantiate(dentry, inode); fsnotify_create(d_inode(dentry->d_parent), dentry); @@ -452,30 +468,22 @@ static struct dentry *__debugfs_create_file(const char *name, umode_t mode, struct dentry *debugfs_create_file_full(const char *name, umode_t mode, struct dentry *parent, void *data, + const void *aux, const struct file_operations *fops) { - if (WARN_ON((unsigned long)fops & - DEBUGFS_FSDATA_IS_REAL_FOPS_BIT)) - return ERR_PTR(-EINVAL); - - return __debugfs_create_file(name, mode, parent, data, - fops ? &debugfs_full_proxy_file_operations : - &debugfs_noop_file_operations, + return __debugfs_create_file(name, mode, parent, data, aux, + &debugfs_full_proxy_file_operations, fops); } EXPORT_SYMBOL_GPL(debugfs_create_file_full); struct dentry *debugfs_create_file_short(const char *name, umode_t mode, - struct dentry *parent, void *data, - const struct debugfs_short_fops *fops) + struct dentry *parent, void *data, + const void *aux, + const struct debugfs_short_fops *fops) { - if (WARN_ON((unsigned long)fops & - DEBUGFS_FSDATA_IS_REAL_FOPS_BIT)) - return ERR_PTR(-EINVAL); - - return __debugfs_create_file(name, mode, parent, data, - fops ? &debugfs_full_short_proxy_file_operations : - &debugfs_noop_file_operations, + return __debugfs_create_file(name, mode, parent, data, aux, + &debugfs_full_short_proxy_file_operations, fops); } EXPORT_SYMBOL_GPL(debugfs_create_file_short); @@ -512,9 +520,8 @@ struct dentry *debugfs_create_file_unsafe(const char *name, umode_t mode, const struct file_operations *fops) { - return __debugfs_create_file(name, mode, parent, data, - fops ? &debugfs_open_proxy_file_operations : - &debugfs_noop_file_operations, + return __debugfs_create_file(name, mode, parent, data, NULL, + &debugfs_open_proxy_file_operations, fops); } EXPORT_SYMBOL_GPL(debugfs_create_file_unsafe); @@ -624,23 +631,13 @@ struct dentry *debugfs_create_automount(const char *name, void *data) { struct dentry *dentry = start_creating(name, parent); - struct debugfs_fsdata *fsd; struct inode *inode; if (IS_ERR(dentry)) return dentry; - fsd = kzalloc(sizeof(*fsd), GFP_KERNEL); - if (!fsd) { - failed_creating(dentry); - return ERR_PTR(-ENOMEM); - } - - fsd->automount = f; - if (!(debugfs_allow & DEBUGFS_ALLOW_API)) { failed_creating(dentry); - kfree(fsd); return ERR_PTR(-EPERM); } @@ -648,14 +645,13 @@ struct dentry *debugfs_create_automount(const char *name, if (unlikely(!inode)) { pr_err("out of free dentries, can not create automount '%s'\n", name); - kfree(fsd); return failed_creating(dentry); } make_empty_dir_inode(inode); inode->i_flags |= S_AUTOMOUNT; inode->i_private = data; - dentry->d_fsdata = fsd; + DEBUGFS_I(inode)->automount = f; /* directory inodes start off with i_nlink == 2 (for "." entry) */ inc_nlink(inode); d_instantiate(dentry, inode); @@ -730,7 +726,7 @@ static void __debugfs_file_removed(struct dentry *dentry) */ smp_mb(); fsd = READ_ONCE(dentry->d_fsdata); - if ((unsigned long)fsd & DEBUGFS_FSDATA_IS_REAL_FOPS_BIT) + if (!fsd) return; /* if this was the last reference, we're done */ @@ -834,76 +830,70 @@ void debugfs_lookup_and_remove(const char *name, struct dentry *parent) EXPORT_SYMBOL_GPL(debugfs_lookup_and_remove); /** - * debugfs_rename - rename a file/directory in the debugfs filesystem - * @old_dir: a pointer to the parent dentry for the renamed object. This - * should be a directory dentry. - * @old_dentry: dentry of an object to be renamed. - * @new_dir: a pointer to the parent dentry where the object should be - * moved. This should be a directory dentry. - * @new_name: a pointer to a string containing the target name. + * debugfs_change_name - rename a file/directory in the debugfs filesystem + * @dentry: dentry of an object to be renamed. + * @fmt: format for new name * * This function renames a file/directory in debugfs. The target must not * exist for rename to succeed. * - * This function will return a pointer to old_dentry (which is updated to - * reflect renaming) if it succeeds. If an error occurs, ERR_PTR(-ERROR) - * will be returned. + * This function will return 0 on success and -E... on failure. * * If debugfs is not enabled in the kernel, the value -%ENODEV will be * returned. */ -struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry, - struct dentry *new_dir, const char *new_name) +int __printf(2, 3) debugfs_change_name(struct dentry *dentry, const char *fmt, ...) { - int error; - struct dentry *dentry = NULL, *trap; + int error = 0; + const char *new_name; struct name_snapshot old_name; + struct dentry *parent, *target; + struct inode *dir; + va_list ap; - if (IS_ERR(old_dir)) - return old_dir; - if (IS_ERR(new_dir)) - return new_dir; - if (IS_ERR_OR_NULL(old_dentry)) - return old_dentry; - - trap = lock_rename(new_dir, old_dir); - /* Source or destination directories don't exist? */ - if (d_really_is_negative(old_dir) || d_really_is_negative(new_dir)) - goto exit; - /* Source does not exist, cyclic rename, or mountpoint? */ - if (d_really_is_negative(old_dentry) || old_dentry == trap || - d_mountpoint(old_dentry)) - goto exit; - dentry = lookup_one_len(new_name, new_dir, strlen(new_name)); - /* Lookup failed, cyclic rename or target exists? */ - if (IS_ERR(dentry) || dentry == trap || d_really_is_positive(dentry)) - goto exit; - - take_dentry_name_snapshot(&old_name, old_dentry); - - error = simple_rename(&nop_mnt_idmap, d_inode(old_dir), old_dentry, - d_inode(new_dir), dentry, 0); - if (error) { - release_dentry_name_snapshot(&old_name); - goto exit; + if (IS_ERR_OR_NULL(dentry)) + return 0; + + va_start(ap, fmt); + new_name = kvasprintf_const(GFP_KERNEL, fmt, ap); + va_end(ap); + if (!new_name) + return -ENOMEM; + + parent = dget_parent(dentry); + dir = d_inode(parent); + inode_lock(dir); + + take_dentry_name_snapshot(&old_name, dentry); + + if (WARN_ON_ONCE(dentry->d_parent != parent)) { + error = -EINVAL; + goto out; } - d_move(old_dentry, dentry); - fsnotify_move(d_inode(old_dir), d_inode(new_dir), &old_name.name, - d_is_dir(old_dentry), - NULL, old_dentry); + if (strcmp(old_name.name.name, new_name) == 0) + goto out; + target = lookup_one_len(new_name, parent, strlen(new_name)); + if (IS_ERR(target)) { + error = PTR_ERR(target); + goto out; + } + if (d_really_is_positive(target)) { + dput(target); + error = -EINVAL; + goto out; + } + simple_rename_timestamp(dir, dentry, dir, target); + d_move(dentry, target); + dput(target); + fsnotify_move(dir, dir, &old_name.name, d_is_dir(dentry), NULL, dentry); +out: release_dentry_name_snapshot(&old_name); - unlock_rename(new_dir, old_dir); - dput(dentry); - return old_dentry; -exit: - if (dentry && !IS_ERR(dentry)) - dput(dentry); - unlock_rename(new_dir, old_dir); - if (IS_ERR(dentry)) - return dentry; - return ERR_PTR(-EINVAL); + inode_unlock(dir); + dput(parent); + kfree_const(new_name); + return error; } -EXPORT_SYMBOL_GPL(debugfs_rename); +EXPORT_SYMBOL_GPL(debugfs_change_name); /** * debugfs_initialized - Tells whether debugfs has been registered @@ -939,12 +929,22 @@ static int __init debugfs_init(void) if (retval) return retval; - retval = register_filesystem(&debug_fs_type); - if (retval) + debugfs_inode_cachep = kmem_cache_create("debugfs_inode_cache", + sizeof(struct debugfs_inode_info), 0, + SLAB_RECLAIM_ACCOUNT | SLAB_ACCOUNT, + init_once); + if (debugfs_inode_cachep == NULL) { sysfs_remove_mount_point(kernel_kobj, "debug"); - else - debugfs_registered = true; + return -ENOMEM; + } - return retval; + retval = register_filesystem(&debug_fs_type); + if (retval) { // Really not going to happen + sysfs_remove_mount_point(kernel_kobj, "debug"); + kmem_cache_destroy(debugfs_inode_cachep); + return retval; + } + debugfs_registered = true; + return 0; } core_initcall(debugfs_init); diff --git a/fs/debugfs/internal.h b/fs/debugfs/internal.h index bbae4a228ef4..93483fe84425 100644 --- a/fs/debugfs/internal.h +++ b/fs/debugfs/internal.h @@ -11,6 +11,22 @@ struct file_operations; +struct debugfs_inode_info { + struct inode vfs_inode; + union { + const void *raw; + const struct file_operations *real_fops; + const struct debugfs_short_fops *short_fops; + debugfs_automount_t automount; + }; + const void *aux; +}; + +static inline struct debugfs_inode_info *DEBUGFS_I(struct inode *inode) +{ + return container_of(inode, struct debugfs_inode_info, vfs_inode); +} + /* declared over in file.c */ extern const struct file_operations debugfs_noop_file_operations; extern const struct file_operations debugfs_open_proxy_file_operations; @@ -20,29 +36,25 @@ extern const struct file_operations debugfs_full_short_proxy_file_operations; struct debugfs_fsdata { const struct file_operations *real_fops; const struct debugfs_short_fops *short_fops; - union { - /* automount_fn is used when real_fops is NULL */ - debugfs_automount_t automount; - struct { - refcount_t active_users; - struct completion active_users_drained; - - /* protect cancellations */ - struct mutex cancellations_mtx; - struct list_head cancellations; - }; + struct { + refcount_t active_users; + struct completion active_users_drained; + + /* protect cancellations */ + struct mutex cancellations_mtx; + struct list_head cancellations; + unsigned int methods; }; }; -/* - * A dentry's ->d_fsdata either points to the real fops or to a - * dynamically allocated debugfs_fsdata instance. - * In order to distinguish between these two cases, a real fops - * pointer gets its lowest bit set. - */ -#define DEBUGFS_FSDATA_IS_REAL_FOPS_BIT BIT(0) +enum { + HAS_READ = 1, + HAS_WRITE = 2, + HAS_LSEEK = 4, + HAS_POLL = 8, + HAS_IOCTL = 16 +}; -/* Access BITS */ #define DEBUGFS_ALLOW_API BIT(0) #define DEBUGFS_ALLOW_MOUNT BIT(1) diff --git a/fs/fuse/Kconfig b/fs/fuse/Kconfig index 8674dbfbe59d..ca215a3cba3e 100644 --- a/fs/fuse/Kconfig +++ b/fs/fuse/Kconfig @@ -63,3 +63,15 @@ config FUSE_PASSTHROUGH to be performed directly on a backing file. If you want to allow passthrough operations, answer Y. + +config FUSE_IO_URING + bool "FUSE communication over io-uring" + default y + depends on FUSE_FS + depends on IO_URING + help + This allows sending FUSE requests over the io-uring interface and + also adds request core affinity. + + If you want to allow fuse server/client communication through io-uring, + answer Y diff --git a/fs/fuse/Makefile b/fs/fuse/Makefile index 2c372180d631..3f0f312a31c1 100644 --- a/fs/fuse/Makefile +++ b/fs/fuse/Makefile @@ -15,5 +15,6 @@ fuse-y += iomode.o fuse-$(CONFIG_FUSE_DAX) += dax.o fuse-$(CONFIG_FUSE_PASSTHROUGH) += passthrough.o fuse-$(CONFIG_SYSCTL) += sysctl.o +fuse-$(CONFIG_FUSE_IO_URING) += dev_uring.o virtiofs-y := virtio_fs.o diff --git a/fs/fuse/dax.c b/fs/fuse/dax.c index 9abbc2f2894f..0b6ee6dd1fd6 100644 --- a/fs/fuse/dax.c +++ b/fs/fuse/dax.c @@ -240,11 +240,12 @@ static int fuse_send_removemapping(struct inode *inode, args.opcode = FUSE_REMOVEMAPPING; args.nodeid = fi->nodeid; - args.in_numargs = 2; - args.in_args[0].size = sizeof(*inargp); - args.in_args[0].value = inargp; - args.in_args[1].size = inargp->count * sizeof(*remove_one); - args.in_args[1].value = remove_one; + args.in_numargs = 3; + fuse_set_zero_arg0(&args); + args.in_args[1].size = sizeof(*inargp); + args.in_args[1].value = inargp; + args.in_args[2].size = inargp->count * sizeof(*remove_one); + args.in_args[2].value = remove_one; return fuse_simple_request(fm, &args); } diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index 27ccae63495d..5b5f789b37eb 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -6,7 +6,9 @@ See the file COPYING. */ +#include "dev_uring_i.h" #include "fuse_i.h" +#include "fuse_dev_i.h" #include <linux/init.h> #include <linux/module.h> @@ -28,23 +30,8 @@ MODULE_ALIAS_MISCDEV(FUSE_MINOR); MODULE_ALIAS("devname:fuse"); -/* Ordinary requests have even IDs, while interrupts IDs are odd */ -#define FUSE_INT_REQ_BIT (1ULL << 0) -#define FUSE_REQ_ID_STEP (1ULL << 1) - static struct kmem_cache *fuse_req_cachep; -static void end_requests(struct list_head *head); - -static struct fuse_dev *fuse_get_dev(struct file *file) -{ - /* - * Lockless access is OK, because file->private data is set - * once during mount and is valid until the file is released. - */ - return READ_ONCE(file->private_data); -} - static void fuse_request_init(struct fuse_mount *fm, struct fuse_req *req) { INIT_LIST_HEAD(&req->list); @@ -89,7 +76,8 @@ void fuse_set_initialized(struct fuse_conn *fc) static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background) { - return !fc->initialized || (for_background && fc->blocked); + return !fc->initialized || (for_background && fc->blocked) || + (fc->io_uring && !fuse_uring_ready(fc)); } static void fuse_drop_waiting(struct fuse_conn *fc) @@ -234,7 +222,7 @@ u64 fuse_get_unique(struct fuse_iqueue *fiq) } EXPORT_SYMBOL_GPL(fuse_get_unique); -static unsigned int fuse_req_hash(u64 unique) +unsigned int fuse_req_hash(u64 unique) { return hash_long(unique & ~FUSE_INT_REQ_BIT, FUSE_PQ_HASH_BITS); } @@ -250,7 +238,8 @@ __releases(fiq->lock) spin_unlock(&fiq->lock); } -static void fuse_dev_queue_forget(struct fuse_iqueue *fiq, struct fuse_forget_link *forget) +void fuse_dev_queue_forget(struct fuse_iqueue *fiq, + struct fuse_forget_link *forget) { spin_lock(&fiq->lock); if (fiq->connected) { @@ -263,7 +252,7 @@ static void fuse_dev_queue_forget(struct fuse_iqueue *fiq, struct fuse_forget_li } } -static void fuse_dev_queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req) +void fuse_dev_queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req) { spin_lock(&fiq->lock); if (list_empty(&req->intr_entry)) { @@ -580,7 +569,25 @@ ssize_t __fuse_simple_request(struct mnt_idmap *idmap, return ret; } -static bool fuse_request_queue_background(struct fuse_req *req) +#ifdef CONFIG_FUSE_IO_URING +static bool fuse_request_queue_background_uring(struct fuse_conn *fc, + struct fuse_req *req) +{ + struct fuse_iqueue *fiq = &fc->iq; + + req->in.h.unique = fuse_get_unique(fiq); + req->in.h.len = sizeof(struct fuse_in_header) + + fuse_len_args(req->args->in_numargs, + (struct fuse_arg *) req->args->in_args); + + return fuse_uring_queue_bq_req(req); +} +#endif + +/* + * @return true if queued + */ +static int fuse_request_queue_background(struct fuse_req *req) { struct fuse_mount *fm = req->fm; struct fuse_conn *fc = fm->fc; @@ -592,6 +599,12 @@ static bool fuse_request_queue_background(struct fuse_req *req) atomic_inc(&fc->num_waiting); } __set_bit(FR_ISREPLY, &req->flags); + +#ifdef CONFIG_FUSE_IO_URING + if (fuse_uring_ready(fc)) + return fuse_request_queue_background_uring(fc, req); +#endif + spin_lock(&fc->bg_lock); if (likely(fc->connected)) { fc->num_background++; @@ -692,22 +705,8 @@ static int unlock_request(struct fuse_req *req) return err; } -struct fuse_copy_state { - int write; - struct fuse_req *req; - struct iov_iter *iter; - struct pipe_buffer *pipebufs; - struct pipe_buffer *currbuf; - struct pipe_inode_info *pipe; - unsigned long nr_segs; - struct page *pg; - unsigned len; - unsigned offset; - unsigned move_pages:1; -}; - -static void fuse_copy_init(struct fuse_copy_state *cs, int write, - struct iov_iter *iter) +void fuse_copy_init(struct fuse_copy_state *cs, int write, + struct iov_iter *iter) { memset(cs, 0, sizeof(*cs)); cs->write = write; @@ -814,6 +813,9 @@ static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size) *size -= ncpy; cs->len -= ncpy; cs->offset += ncpy; + if (cs->is_uring) + cs->ring.copied_sz += ncpy; + return ncpy; } @@ -1068,9 +1070,9 @@ static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size) } /* Copy request arguments to/from userspace buffer */ -static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs, - unsigned argpages, struct fuse_arg *args, - int zeroing) +int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs, + unsigned argpages, struct fuse_arg *args, + int zeroing) { int err = 0; unsigned i; @@ -1760,7 +1762,7 @@ static int fuse_retrieve(struct fuse_mount *fm, struct inode *inode, args = &ap->args; args->nodeid = outarg->nodeid; args->opcode = FUSE_NOTIFY_REPLY; - args->in_numargs = 2; + args->in_numargs = 3; args->in_pages = true; args->end = fuse_retrieve_end; @@ -1788,9 +1790,10 @@ static int fuse_retrieve(struct fuse_mount *fm, struct inode *inode, } ra->inarg.offset = outarg->offset; ra->inarg.size = total_len; - args->in_args[0].size = sizeof(ra->inarg); - args->in_args[0].value = &ra->inarg; - args->in_args[1].size = total_len; + fuse_set_zero_arg0(args); + args->in_args[1].size = sizeof(ra->inarg); + args->in_args[1].value = &ra->inarg; + args->in_args[2].size = total_len; err = fuse_simple_notify_reply(fm, args, outarg->notify_unique); if (err) @@ -1885,7 +1888,7 @@ static void fuse_resend(struct fuse_conn *fc) spin_unlock(&fiq->lock); list_for_each_entry(req, &to_queue, list) clear_bit(FR_PENDING, &req->flags); - end_requests(&to_queue); + fuse_dev_end_requests(&to_queue); return; } /* iq and pq requests are both oldest to newest */ @@ -1934,7 +1937,7 @@ static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code, } /* Look up request on processing list by unique ID */ -static struct fuse_req *request_find(struct fuse_pqueue *fpq, u64 unique) +struct fuse_req *fuse_request_find(struct fuse_pqueue *fpq, u64 unique) { unsigned int hash = fuse_req_hash(unique); struct fuse_req *req; @@ -1946,10 +1949,17 @@ static struct fuse_req *request_find(struct fuse_pqueue *fpq, u64 unique) return NULL; } -static int copy_out_args(struct fuse_copy_state *cs, struct fuse_args *args, - unsigned nbytes) +int fuse_copy_out_args(struct fuse_copy_state *cs, struct fuse_args *args, + unsigned nbytes) { - unsigned reqsize = sizeof(struct fuse_out_header); + + unsigned int reqsize = 0; + + /* + * Uring has all headers separated from args - args is payload only + */ + if (!cs->is_uring) + reqsize = sizeof(struct fuse_out_header); reqsize += fuse_len_args(args->out_numargs, args->out_args); @@ -2011,7 +2021,7 @@ static ssize_t fuse_dev_do_write(struct fuse_dev *fud, spin_lock(&fpq->lock); req = NULL; if (fpq->connected) - req = request_find(fpq, oh.unique & ~FUSE_INT_REQ_BIT); + req = fuse_request_find(fpq, oh.unique & ~FUSE_INT_REQ_BIT); err = -ENOENT; if (!req) { @@ -2049,7 +2059,7 @@ static ssize_t fuse_dev_do_write(struct fuse_dev *fud, if (oh.error) err = nbytes != sizeof(oh) ? -EINVAL : 0; else - err = copy_out_args(cs, req->args, nbytes); + err = fuse_copy_out_args(cs, req->args, nbytes); fuse_copy_finish(cs); spin_lock(&fpq->lock); @@ -2204,7 +2214,7 @@ static __poll_t fuse_dev_poll(struct file *file, poll_table *wait) } /* Abort all requests on the given list (pending or processing) */ -static void end_requests(struct list_head *head) +void fuse_dev_end_requests(struct list_head *head) { while (!list_empty(head)) { struct fuse_req *req; @@ -2307,7 +2317,13 @@ void fuse_abort_conn(struct fuse_conn *fc) wake_up_all(&fc->blocked_waitq); spin_unlock(&fc->lock); - end_requests(&to_end); + fuse_dev_end_requests(&to_end); + + /* + * fc->lock must not be taken to avoid conflicts with io-uring + * locks + */ + fuse_uring_abort(fc); } else { spin_unlock(&fc->lock); } @@ -2319,6 +2335,8 @@ void fuse_wait_aborted(struct fuse_conn *fc) /* matches implicit memory barrier in fuse_drop_waiting() */ smp_mb(); wait_event(fc->blocked_waitq, atomic_read(&fc->num_waiting) == 0); + + fuse_uring_wait_stopped_queues(fc); } int fuse_dev_release(struct inode *inode, struct file *file) @@ -2337,7 +2355,7 @@ int fuse_dev_release(struct inode *inode, struct file *file) list_splice_init(&fpq->processing[i], &to_end); spin_unlock(&fpq->lock); - end_requests(&to_end); + fuse_dev_end_requests(&to_end); /* Are we the last open device? */ if (atomic_dec_and_test(&fc->dev_count)) { @@ -2475,6 +2493,9 @@ const struct file_operations fuse_dev_operations = { .fasync = fuse_dev_fasync, .unlocked_ioctl = fuse_dev_ioctl, .compat_ioctl = compat_ptr_ioctl, +#ifdef CONFIG_FUSE_IO_URING + .uring_cmd = fuse_uring_cmd, +#endif }; EXPORT_SYMBOL_GPL(fuse_dev_operations); diff --git a/fs/fuse/dev_uring.c b/fs/fuse/dev_uring.c new file mode 100644 index 000000000000..ebd2931b4f2a --- /dev/null +++ b/fs/fuse/dev_uring.c @@ -0,0 +1,1319 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * FUSE: Filesystem in Userspace + * Copyright (c) 2023-2024 DataDirect Networks. + */ + +#include "fuse_i.h" +#include "dev_uring_i.h" +#include "fuse_dev_i.h" + +#include <linux/fs.h> +#include <linux/io_uring/cmd.h> + +static bool __read_mostly enable_uring; +module_param(enable_uring, bool, 0644); +MODULE_PARM_DESC(enable_uring, + "Enable userspace communication through io-uring"); + +#define FUSE_URING_IOV_SEGS 2 /* header and payload */ + + +bool fuse_uring_enabled(void) +{ + return enable_uring; +} + +struct fuse_uring_pdu { + struct fuse_ring_ent *ent; +}; + +static const struct fuse_iqueue_ops fuse_io_uring_ops; + +static void uring_cmd_set_ring_ent(struct io_uring_cmd *cmd, + struct fuse_ring_ent *ring_ent) +{ + struct fuse_uring_pdu *pdu = + io_uring_cmd_to_pdu(cmd, struct fuse_uring_pdu); + + pdu->ent = ring_ent; +} + +static struct fuse_ring_ent *uring_cmd_to_ring_ent(struct io_uring_cmd *cmd) +{ + struct fuse_uring_pdu *pdu = + io_uring_cmd_to_pdu(cmd, struct fuse_uring_pdu); + + return pdu->ent; +} + +static void fuse_uring_flush_bg(struct fuse_ring_queue *queue) +{ + struct fuse_ring *ring = queue->ring; + struct fuse_conn *fc = ring->fc; + + lockdep_assert_held(&queue->lock); + lockdep_assert_held(&fc->bg_lock); + + /* + * Allow one bg request per queue, ignoring global fc limits. + * This prevents a single queue from consuming all resources and + * eliminates the need for remote queue wake-ups when global + * limits are met but this queue has no more waiting requests. + */ + while ((fc->active_background < fc->max_background || + !queue->active_background) && + (!list_empty(&queue->fuse_req_bg_queue))) { + struct fuse_req *req; + + req = list_first_entry(&queue->fuse_req_bg_queue, + struct fuse_req, list); + fc->active_background++; + queue->active_background++; + + list_move_tail(&req->list, &queue->fuse_req_queue); + } +} + +static void fuse_uring_req_end(struct fuse_ring_ent *ent, struct fuse_req *req, + int error) +{ + struct fuse_ring_queue *queue = ent->queue; + struct fuse_ring *ring = queue->ring; + struct fuse_conn *fc = ring->fc; + + lockdep_assert_not_held(&queue->lock); + spin_lock(&queue->lock); + ent->fuse_req = NULL; + if (test_bit(FR_BACKGROUND, &req->flags)) { + queue->active_background--; + spin_lock(&fc->bg_lock); + fuse_uring_flush_bg(queue); + spin_unlock(&fc->bg_lock); + } + + spin_unlock(&queue->lock); + + if (error) + req->out.h.error = error; + + clear_bit(FR_SENT, &req->flags); + fuse_request_end(req); +} + +/* Abort all list queued request on the given ring queue */ +static void fuse_uring_abort_end_queue_requests(struct fuse_ring_queue *queue) +{ + struct fuse_req *req; + LIST_HEAD(req_list); + + spin_lock(&queue->lock); + list_for_each_entry(req, &queue->fuse_req_queue, list) + clear_bit(FR_PENDING, &req->flags); + list_splice_init(&queue->fuse_req_queue, &req_list); + spin_unlock(&queue->lock); + + /* must not hold queue lock to avoid order issues with fi->lock */ + fuse_dev_end_requests(&req_list); +} + +void fuse_uring_abort_end_requests(struct fuse_ring *ring) +{ + int qid; + struct fuse_ring_queue *queue; + struct fuse_conn *fc = ring->fc; + + for (qid = 0; qid < ring->nr_queues; qid++) { + queue = READ_ONCE(ring->queues[qid]); + if (!queue) + continue; + + queue->stopped = true; + + WARN_ON_ONCE(ring->fc->max_background != UINT_MAX); + spin_lock(&queue->lock); + spin_lock(&fc->bg_lock); + fuse_uring_flush_bg(queue); + spin_unlock(&fc->bg_lock); + spin_unlock(&queue->lock); + fuse_uring_abort_end_queue_requests(queue); + } +} + +void fuse_uring_destruct(struct fuse_conn *fc) +{ + struct fuse_ring *ring = fc->ring; + int qid; + + if (!ring) + return; + + for (qid = 0; qid < ring->nr_queues; qid++) { + struct fuse_ring_queue *queue = ring->queues[qid]; + struct fuse_ring_ent *ent, *next; + + if (!queue) + continue; + + WARN_ON(!list_empty(&queue->ent_avail_queue)); + WARN_ON(!list_empty(&queue->ent_w_req_queue)); + WARN_ON(!list_empty(&queue->ent_commit_queue)); + WARN_ON(!list_empty(&queue->ent_in_userspace)); + + list_for_each_entry_safe(ent, next, &queue->ent_released, + list) { + list_del_init(&ent->list); + kfree(ent); + } + + kfree(queue->fpq.processing); + kfree(queue); + ring->queues[qid] = NULL; + } + + kfree(ring->queues); + kfree(ring); + fc->ring = NULL; +} + +/* + * Basic ring setup for this connection based on the provided configuration + */ +static struct fuse_ring *fuse_uring_create(struct fuse_conn *fc) +{ + struct fuse_ring *ring; + size_t nr_queues = num_possible_cpus(); + struct fuse_ring *res = NULL; + size_t max_payload_size; + + ring = kzalloc(sizeof(*fc->ring), GFP_KERNEL_ACCOUNT); + if (!ring) + return NULL; + + ring->queues = kcalloc(nr_queues, sizeof(struct fuse_ring_queue *), + GFP_KERNEL_ACCOUNT); + if (!ring->queues) + goto out_err; + + max_payload_size = max(FUSE_MIN_READ_BUFFER, fc->max_write); + max_payload_size = max(max_payload_size, fc->max_pages * PAGE_SIZE); + + spin_lock(&fc->lock); + if (fc->ring) { + /* race, another thread created the ring in the meantime */ + spin_unlock(&fc->lock); + res = fc->ring; + goto out_err; + } + + init_waitqueue_head(&ring->stop_waitq); + + fc->ring = ring; + ring->nr_queues = nr_queues; + ring->fc = fc; + ring->max_payload_sz = max_payload_size; + atomic_set(&ring->queue_refs, 0); + + spin_unlock(&fc->lock); + return ring; + +out_err: + kfree(ring->queues); + kfree(ring); + return res; +} + +static struct fuse_ring_queue *fuse_uring_create_queue(struct fuse_ring *ring, + int qid) +{ + struct fuse_conn *fc = ring->fc; + struct fuse_ring_queue *queue; + struct list_head *pq; + + queue = kzalloc(sizeof(*queue), GFP_KERNEL_ACCOUNT); + if (!queue) + return NULL; + pq = kcalloc(FUSE_PQ_HASH_SIZE, sizeof(struct list_head), GFP_KERNEL); + if (!pq) { + kfree(queue); + return NULL; + } + + queue->qid = qid; + queue->ring = ring; + spin_lock_init(&queue->lock); + + INIT_LIST_HEAD(&queue->ent_avail_queue); + INIT_LIST_HEAD(&queue->ent_commit_queue); + INIT_LIST_HEAD(&queue->ent_w_req_queue); + INIT_LIST_HEAD(&queue->ent_in_userspace); + INIT_LIST_HEAD(&queue->fuse_req_queue); + INIT_LIST_HEAD(&queue->fuse_req_bg_queue); + INIT_LIST_HEAD(&queue->ent_released); + + queue->fpq.processing = pq; + fuse_pqueue_init(&queue->fpq); + + spin_lock(&fc->lock); + if (ring->queues[qid]) { + spin_unlock(&fc->lock); + kfree(queue->fpq.processing); + kfree(queue); + return ring->queues[qid]; + } + + /* + * write_once and lock as the caller mostly doesn't take the lock at all + */ + WRITE_ONCE(ring->queues[qid], queue); + spin_unlock(&fc->lock); + + return queue; +} + +static void fuse_uring_stop_fuse_req_end(struct fuse_req *req) +{ + clear_bit(FR_SENT, &req->flags); + req->out.h.error = -ECONNABORTED; + fuse_request_end(req); +} + +/* + * Release a request/entry on connection tear down + */ +static void fuse_uring_entry_teardown(struct fuse_ring_ent *ent) +{ + struct fuse_req *req; + struct io_uring_cmd *cmd; + + struct fuse_ring_queue *queue = ent->queue; + + spin_lock(&queue->lock); + cmd = ent->cmd; + ent->cmd = NULL; + req = ent->fuse_req; + ent->fuse_req = NULL; + if (req) { + /* remove entry from queue->fpq->processing */ + list_del_init(&req->list); + } + + /* + * The entry must not be freed immediately, due to access of direct + * pointer access of entries through IO_URING_F_CANCEL - there is a risk + * of race between daemon termination (which triggers IO_URING_F_CANCEL + * and accesses entries without checking the list state first + */ + list_move(&ent->list, &queue->ent_released); + ent->state = FRRS_RELEASED; + spin_unlock(&queue->lock); + + if (cmd) + io_uring_cmd_done(cmd, -ENOTCONN, 0, IO_URING_F_UNLOCKED); + + if (req) + fuse_uring_stop_fuse_req_end(req); +} + +static void fuse_uring_stop_list_entries(struct list_head *head, + struct fuse_ring_queue *queue, + enum fuse_ring_req_state exp_state) +{ + struct fuse_ring *ring = queue->ring; + struct fuse_ring_ent *ent, *next; + ssize_t queue_refs = SSIZE_MAX; + LIST_HEAD(to_teardown); + + spin_lock(&queue->lock); + list_for_each_entry_safe(ent, next, head, list) { + if (ent->state != exp_state) { + pr_warn("entry teardown qid=%d state=%d expected=%d", + queue->qid, ent->state, exp_state); + continue; + } + + ent->state = FRRS_TEARDOWN; + list_move(&ent->list, &to_teardown); + } + spin_unlock(&queue->lock); + + /* no queue lock to avoid lock order issues */ + list_for_each_entry_safe(ent, next, &to_teardown, list) { + fuse_uring_entry_teardown(ent); + queue_refs = atomic_dec_return(&ring->queue_refs); + WARN_ON_ONCE(queue_refs < 0); + } +} + +static void fuse_uring_teardown_entries(struct fuse_ring_queue *queue) +{ + fuse_uring_stop_list_entries(&queue->ent_in_userspace, queue, + FRRS_USERSPACE); + fuse_uring_stop_list_entries(&queue->ent_avail_queue, queue, + FRRS_AVAILABLE); +} + +/* + * Log state debug info + */ +static void fuse_uring_log_ent_state(struct fuse_ring *ring) +{ + int qid; + struct fuse_ring_ent *ent; + + for (qid = 0; qid < ring->nr_queues; qid++) { + struct fuse_ring_queue *queue = ring->queues[qid]; + + if (!queue) + continue; + + spin_lock(&queue->lock); + /* + * Log entries from the intermediate queue, the other queues + * should be empty + */ + list_for_each_entry(ent, &queue->ent_w_req_queue, list) { + pr_info(" ent-req-queue ring=%p qid=%d ent=%p state=%d\n", + ring, qid, ent, ent->state); + } + list_for_each_entry(ent, &queue->ent_commit_queue, list) { + pr_info(" ent-commit-queue ring=%p qid=%d ent=%p state=%d\n", + ring, qid, ent, ent->state); + } + spin_unlock(&queue->lock); + } + ring->stop_debug_log = 1; +} + +static void fuse_uring_async_stop_queues(struct work_struct *work) +{ + int qid; + struct fuse_ring *ring = + container_of(work, struct fuse_ring, async_teardown_work.work); + + /* XXX code dup */ + for (qid = 0; qid < ring->nr_queues; qid++) { + struct fuse_ring_queue *queue = READ_ONCE(ring->queues[qid]); + + if (!queue) + continue; + + fuse_uring_teardown_entries(queue); + } + + /* + * Some ring entries might be in the middle of IO operations, + * i.e. in process to get handled by file_operations::uring_cmd + * or on the way to userspace - we could handle that with conditions in + * run time code, but easier/cleaner to have an async tear down handler + * If there are still queue references left + */ + if (atomic_read(&ring->queue_refs) > 0) { + if (time_after(jiffies, + ring->teardown_time + FUSE_URING_TEARDOWN_TIMEOUT)) + fuse_uring_log_ent_state(ring); + + schedule_delayed_work(&ring->async_teardown_work, + FUSE_URING_TEARDOWN_INTERVAL); + } else { + wake_up_all(&ring->stop_waitq); + } +} + +/* + * Stop the ring queues + */ +void fuse_uring_stop_queues(struct fuse_ring *ring) +{ + int qid; + + for (qid = 0; qid < ring->nr_queues; qid++) { + struct fuse_ring_queue *queue = READ_ONCE(ring->queues[qid]); + + if (!queue) + continue; + + fuse_uring_teardown_entries(queue); + } + + if (atomic_read(&ring->queue_refs) > 0) { + ring->teardown_time = jiffies; + INIT_DELAYED_WORK(&ring->async_teardown_work, + fuse_uring_async_stop_queues); + schedule_delayed_work(&ring->async_teardown_work, + FUSE_URING_TEARDOWN_INTERVAL); + } else { + wake_up_all(&ring->stop_waitq); + } +} + +/* + * Handle IO_URING_F_CANCEL, typically should come on daemon termination. + * + * Releasing the last entry should trigger fuse_dev_release() if + * the daemon was terminated + */ +static void fuse_uring_cancel(struct io_uring_cmd *cmd, + unsigned int issue_flags) +{ + struct fuse_ring_ent *ent = uring_cmd_to_ring_ent(cmd); + struct fuse_ring_queue *queue; + bool need_cmd_done = false; + + /* + * direct access on ent - it must not be destructed as long as + * IO_URING_F_CANCEL might come up + */ + queue = ent->queue; + spin_lock(&queue->lock); + if (ent->state == FRRS_AVAILABLE) { + ent->state = FRRS_USERSPACE; + list_move(&ent->list, &queue->ent_in_userspace); + need_cmd_done = true; + ent->cmd = NULL; + } + spin_unlock(&queue->lock); + + if (need_cmd_done) { + /* no queue lock to avoid lock order issues */ + io_uring_cmd_done(cmd, -ENOTCONN, 0, issue_flags); + } +} + +static void fuse_uring_prepare_cancel(struct io_uring_cmd *cmd, int issue_flags, + struct fuse_ring_ent *ring_ent) +{ + uring_cmd_set_ring_ent(cmd, ring_ent); + io_uring_cmd_mark_cancelable(cmd, issue_flags); +} + +/* + * Checks for errors and stores it into the request + */ +static int fuse_uring_out_header_has_err(struct fuse_out_header *oh, + struct fuse_req *req, + struct fuse_conn *fc) +{ + int err; + + err = -EINVAL; + if (oh->unique == 0) { + /* Not supported through io-uring yet */ + pr_warn_once("notify through fuse-io-uring not supported\n"); + goto err; + } + + if (oh->error <= -ERESTARTSYS || oh->error > 0) + goto err; + + if (oh->error) { + err = oh->error; + goto err; + } + + err = -ENOENT; + if ((oh->unique & ~FUSE_INT_REQ_BIT) != req->in.h.unique) { + pr_warn_ratelimited("unique mismatch, expected: %llu got %llu\n", + req->in.h.unique, + oh->unique & ~FUSE_INT_REQ_BIT); + goto err; + } + + /* + * Is it an interrupt reply ID? + * XXX: Not supported through fuse-io-uring yet, it should not even + * find the request - should not happen. + */ + WARN_ON_ONCE(oh->unique & FUSE_INT_REQ_BIT); + + err = 0; +err: + return err; +} + +static int fuse_uring_copy_from_ring(struct fuse_ring *ring, + struct fuse_req *req, + struct fuse_ring_ent *ent) +{ + struct fuse_copy_state cs; + struct fuse_args *args = req->args; + struct iov_iter iter; + int err; + struct fuse_uring_ent_in_out ring_in_out; + + err = copy_from_user(&ring_in_out, &ent->headers->ring_ent_in_out, + sizeof(ring_in_out)); + if (err) + return -EFAULT; + + err = import_ubuf(ITER_SOURCE, ent->payload, ring->max_payload_sz, + &iter); + if (err) + return err; + + fuse_copy_init(&cs, 0, &iter); + cs.is_uring = 1; + cs.req = req; + + return fuse_copy_out_args(&cs, args, ring_in_out.payload_sz); +} + + /* + * Copy data from the req to the ring buffer + */ +static int fuse_uring_args_to_ring(struct fuse_ring *ring, struct fuse_req *req, + struct fuse_ring_ent *ent) +{ + struct fuse_copy_state cs; + struct fuse_args *args = req->args; + struct fuse_in_arg *in_args = args->in_args; + int num_args = args->in_numargs; + int err; + struct iov_iter iter; + struct fuse_uring_ent_in_out ent_in_out = { + .flags = 0, + .commit_id = req->in.h.unique, + }; + + err = import_ubuf(ITER_DEST, ent->payload, ring->max_payload_sz, &iter); + if (err) { + pr_info_ratelimited("fuse: Import of user buffer failed\n"); + return err; + } + + fuse_copy_init(&cs, 1, &iter); + cs.is_uring = 1; + cs.req = req; + + if (num_args > 0) { + /* + * Expectation is that the first argument is the per op header. + * Some op code have that as zero size. + */ + if (args->in_args[0].size > 0) { + err = copy_to_user(&ent->headers->op_in, in_args->value, + in_args->size); + if (err) { + pr_info_ratelimited( + "Copying the header failed.\n"); + return -EFAULT; + } + } + in_args++; + num_args--; + } + + /* copy the payload */ + err = fuse_copy_args(&cs, num_args, args->in_pages, + (struct fuse_arg *)in_args, 0); + if (err) { + pr_info_ratelimited("%s fuse_copy_args failed\n", __func__); + return err; + } + + ent_in_out.payload_sz = cs.ring.copied_sz; + err = copy_to_user(&ent->headers->ring_ent_in_out, &ent_in_out, + sizeof(ent_in_out)); + return err ? -EFAULT : 0; +} + +static int fuse_uring_copy_to_ring(struct fuse_ring_ent *ent, + struct fuse_req *req) +{ + struct fuse_ring_queue *queue = ent->queue; + struct fuse_ring *ring = queue->ring; + int err; + + err = -EIO; + if (WARN_ON(ent->state != FRRS_FUSE_REQ)) { + pr_err("qid=%d ring-req=%p invalid state %d on send\n", + queue->qid, ent, ent->state); + return err; + } + + err = -EINVAL; + if (WARN_ON(req->in.h.unique == 0)) + return err; + + /* copy the request */ + err = fuse_uring_args_to_ring(ring, req, ent); + if (unlikely(err)) { + pr_info_ratelimited("Copy to ring failed: %d\n", err); + return err; + } + + /* copy fuse_in_header */ + err = copy_to_user(&ent->headers->in_out, &req->in.h, + sizeof(req->in.h)); + if (err) { + err = -EFAULT; + return err; + } + + return 0; +} + +static int fuse_uring_prepare_send(struct fuse_ring_ent *ent, + struct fuse_req *req) +{ + int err; + + err = fuse_uring_copy_to_ring(ent, req); + if (!err) + set_bit(FR_SENT, &req->flags); + else + fuse_uring_req_end(ent, req, err); + + return err; +} + +/* + * Write data to the ring buffer and send the request to userspace, + * userspace will read it + * This is comparable with classical read(/dev/fuse) + */ +static int fuse_uring_send_next_to_ring(struct fuse_ring_ent *ent, + struct fuse_req *req, + unsigned int issue_flags) +{ + struct fuse_ring_queue *queue = ent->queue; + int err; + struct io_uring_cmd *cmd; + + err = fuse_uring_prepare_send(ent, req); + if (err) + return err; + + spin_lock(&queue->lock); + cmd = ent->cmd; + ent->cmd = NULL; + ent->state = FRRS_USERSPACE; + list_move(&ent->list, &queue->ent_in_userspace); + spin_unlock(&queue->lock); + + io_uring_cmd_done(cmd, 0, 0, issue_flags); + return 0; +} + +/* + * Make a ring entry available for fuse_req assignment + */ +static void fuse_uring_ent_avail(struct fuse_ring_ent *ent, + struct fuse_ring_queue *queue) +{ + WARN_ON_ONCE(!ent->cmd); + list_move(&ent->list, &queue->ent_avail_queue); + ent->state = FRRS_AVAILABLE; +} + +/* Used to find the request on SQE commit */ +static void fuse_uring_add_to_pq(struct fuse_ring_ent *ent, + struct fuse_req *req) +{ + struct fuse_ring_queue *queue = ent->queue; + struct fuse_pqueue *fpq = &queue->fpq; + unsigned int hash; + + req->ring_entry = ent; + hash = fuse_req_hash(req->in.h.unique); + list_move_tail(&req->list, &fpq->processing[hash]); +} + +/* + * Assign a fuse queue entry to the given entry + */ +static void fuse_uring_add_req_to_ring_ent(struct fuse_ring_ent *ent, + struct fuse_req *req) +{ + struct fuse_ring_queue *queue = ent->queue; + struct fuse_conn *fc = req->fm->fc; + struct fuse_iqueue *fiq = &fc->iq; + + lockdep_assert_held(&queue->lock); + + if (WARN_ON_ONCE(ent->state != FRRS_AVAILABLE && + ent->state != FRRS_COMMIT)) { + pr_warn("%s qid=%d state=%d\n", __func__, ent->queue->qid, + ent->state); + } + + spin_lock(&fiq->lock); + clear_bit(FR_PENDING, &req->flags); + spin_unlock(&fiq->lock); + ent->fuse_req = req; + ent->state = FRRS_FUSE_REQ; + list_move(&ent->list, &queue->ent_w_req_queue); + fuse_uring_add_to_pq(ent, req); +} + +/* Fetch the next fuse request if available */ +static struct fuse_req *fuse_uring_ent_assign_req(struct fuse_ring_ent *ent) + __must_hold(&queue->lock) +{ + struct fuse_req *req; + struct fuse_ring_queue *queue = ent->queue; + struct list_head *req_queue = &queue->fuse_req_queue; + + lockdep_assert_held(&queue->lock); + + /* get and assign the next entry while it is still holding the lock */ + req = list_first_entry_or_null(req_queue, struct fuse_req, list); + if (req) + fuse_uring_add_req_to_ring_ent(ent, req); + + return req; +} + +/* + * Read data from the ring buffer, which user space has written to + * This is comparible with handling of classical write(/dev/fuse). + * Also make the ring request available again for new fuse requests. + */ +static void fuse_uring_commit(struct fuse_ring_ent *ent, struct fuse_req *req, + unsigned int issue_flags) +{ + struct fuse_ring *ring = ent->queue->ring; + struct fuse_conn *fc = ring->fc; + ssize_t err = 0; + + err = copy_from_user(&req->out.h, &ent->headers->in_out, + sizeof(req->out.h)); + if (err) { + req->out.h.error = -EFAULT; + goto out; + } + + err = fuse_uring_out_header_has_err(&req->out.h, req, fc); + if (err) { + /* req->out.h.error already set */ + goto out; + } + + err = fuse_uring_copy_from_ring(ring, req, ent); +out: + fuse_uring_req_end(ent, req, err); +} + +/* + * Get the next fuse req and send it + */ +static void fuse_uring_next_fuse_req(struct fuse_ring_ent *ent, + struct fuse_ring_queue *queue, + unsigned int issue_flags) +{ + int err; + struct fuse_req *req; + +retry: + spin_lock(&queue->lock); + fuse_uring_ent_avail(ent, queue); + req = fuse_uring_ent_assign_req(ent); + spin_unlock(&queue->lock); + + if (req) { + err = fuse_uring_send_next_to_ring(ent, req, issue_flags); + if (err) + goto retry; + } +} + +static int fuse_ring_ent_set_commit(struct fuse_ring_ent *ent) +{ + struct fuse_ring_queue *queue = ent->queue; + + lockdep_assert_held(&queue->lock); + + if (WARN_ON_ONCE(ent->state != FRRS_USERSPACE)) + return -EIO; + + ent->state = FRRS_COMMIT; + list_move(&ent->list, &queue->ent_commit_queue); + + return 0; +} + +/* FUSE_URING_CMD_COMMIT_AND_FETCH handler */ +static int fuse_uring_commit_fetch(struct io_uring_cmd *cmd, int issue_flags, + struct fuse_conn *fc) +{ + const struct fuse_uring_cmd_req *cmd_req = io_uring_sqe_cmd(cmd->sqe); + struct fuse_ring_ent *ent; + int err; + struct fuse_ring *ring = fc->ring; + struct fuse_ring_queue *queue; + uint64_t commit_id = READ_ONCE(cmd_req->commit_id); + unsigned int qid = READ_ONCE(cmd_req->qid); + struct fuse_pqueue *fpq; + struct fuse_req *req; + + err = -ENOTCONN; + if (!ring) + return err; + + if (qid >= ring->nr_queues) + return -EINVAL; + + queue = ring->queues[qid]; + if (!queue) + return err; + fpq = &queue->fpq; + + if (!READ_ONCE(fc->connected) || READ_ONCE(queue->stopped)) + return err; + + spin_lock(&queue->lock); + /* Find a request based on the unique ID of the fuse request + * This should get revised, as it needs a hash calculation and list + * search. And full struct fuse_pqueue is needed (memory overhead). + * As well as the link from req to ring_ent. + */ + req = fuse_request_find(fpq, commit_id); + err = -ENOENT; + if (!req) { + pr_info("qid=%d commit_id %llu not found\n", queue->qid, + commit_id); + spin_unlock(&queue->lock); + return err; + } + list_del_init(&req->list); + ent = req->ring_entry; + req->ring_entry = NULL; + + err = fuse_ring_ent_set_commit(ent); + if (err != 0) { + pr_info_ratelimited("qid=%d commit_id %llu state %d", + queue->qid, commit_id, ent->state); + spin_unlock(&queue->lock); + req->out.h.error = err; + clear_bit(FR_SENT, &req->flags); + fuse_request_end(req); + return err; + } + + ent->cmd = cmd; + spin_unlock(&queue->lock); + + /* without the queue lock, as other locks are taken */ + fuse_uring_prepare_cancel(cmd, issue_flags, ent); + fuse_uring_commit(ent, req, issue_flags); + + /* + * Fetching the next request is absolutely required as queued + * fuse requests would otherwise not get processed - committing + * and fetching is done in one step vs legacy fuse, which has separated + * read (fetch request) and write (commit result). + */ + fuse_uring_next_fuse_req(ent, queue, issue_flags); + return 0; +} + +static bool is_ring_ready(struct fuse_ring *ring, int current_qid) +{ + int qid; + struct fuse_ring_queue *queue; + bool ready = true; + + for (qid = 0; qid < ring->nr_queues && ready; qid++) { + if (current_qid == qid) + continue; + + queue = ring->queues[qid]; + if (!queue) { + ready = false; + break; + } + + spin_lock(&queue->lock); + if (list_empty(&queue->ent_avail_queue)) + ready = false; + spin_unlock(&queue->lock); + } + + return ready; +} + +/* + * fuse_uring_req_fetch command handling + */ +static void fuse_uring_do_register(struct fuse_ring_ent *ent, + struct io_uring_cmd *cmd, + unsigned int issue_flags) +{ + struct fuse_ring_queue *queue = ent->queue; + struct fuse_ring *ring = queue->ring; + struct fuse_conn *fc = ring->fc; + struct fuse_iqueue *fiq = &fc->iq; + + fuse_uring_prepare_cancel(cmd, issue_flags, ent); + + spin_lock(&queue->lock); + ent->cmd = cmd; + fuse_uring_ent_avail(ent, queue); + spin_unlock(&queue->lock); + + if (!ring->ready) { + bool ready = is_ring_ready(ring, queue->qid); + + if (ready) { + WRITE_ONCE(fiq->ops, &fuse_io_uring_ops); + WRITE_ONCE(ring->ready, true); + wake_up_all(&fc->blocked_waitq); + } + } +} + +/* + * sqe->addr is a ptr to an iovec array, iov[0] has the headers, iov[1] + * the payload + */ +static int fuse_uring_get_iovec_from_sqe(const struct io_uring_sqe *sqe, + struct iovec iov[FUSE_URING_IOV_SEGS]) +{ + struct iovec __user *uiov = u64_to_user_ptr(READ_ONCE(sqe->addr)); + struct iov_iter iter; + ssize_t ret; + + if (sqe->len != FUSE_URING_IOV_SEGS) + return -EINVAL; + + /* + * Direction for buffer access will actually be READ and WRITE, + * using write for the import should include READ access as well. + */ + ret = import_iovec(WRITE, uiov, FUSE_URING_IOV_SEGS, + FUSE_URING_IOV_SEGS, &iov, &iter); + if (ret < 0) + return ret; + + return 0; +} + +static struct fuse_ring_ent * +fuse_uring_create_ring_ent(struct io_uring_cmd *cmd, + struct fuse_ring_queue *queue) +{ + struct fuse_ring *ring = queue->ring; + struct fuse_ring_ent *ent; + size_t payload_size; + struct iovec iov[FUSE_URING_IOV_SEGS]; + int err; + + err = fuse_uring_get_iovec_from_sqe(cmd->sqe, iov); + if (err) { + pr_info_ratelimited("Failed to get iovec from sqe, err=%d\n", + err); + return ERR_PTR(err); + } + + err = -EINVAL; + if (iov[0].iov_len < sizeof(struct fuse_uring_req_header)) { + pr_info_ratelimited("Invalid header len %zu\n", iov[0].iov_len); + return ERR_PTR(err); + } + + payload_size = iov[1].iov_len; + if (payload_size < ring->max_payload_sz) { + pr_info_ratelimited("Invalid req payload len %zu\n", + payload_size); + return ERR_PTR(err); + } + + err = -ENOMEM; + ent = kzalloc(sizeof(*ent), GFP_KERNEL_ACCOUNT); + if (!ent) + return ERR_PTR(err); + + INIT_LIST_HEAD(&ent->list); + + ent->queue = queue; + ent->headers = iov[0].iov_base; + ent->payload = iov[1].iov_base; + + atomic_inc(&ring->queue_refs); + return ent; +} + +/* + * Register header and payload buffer with the kernel and puts the + * entry as "ready to get fuse requests" on the queue + */ +static int fuse_uring_register(struct io_uring_cmd *cmd, + unsigned int issue_flags, struct fuse_conn *fc) +{ + const struct fuse_uring_cmd_req *cmd_req = io_uring_sqe_cmd(cmd->sqe); + struct fuse_ring *ring = fc->ring; + struct fuse_ring_queue *queue; + struct fuse_ring_ent *ent; + int err; + unsigned int qid = READ_ONCE(cmd_req->qid); + + err = -ENOMEM; + if (!ring) { + ring = fuse_uring_create(fc); + if (!ring) + return err; + } + + if (qid >= ring->nr_queues) { + pr_info_ratelimited("fuse: Invalid ring qid %u\n", qid); + return -EINVAL; + } + + queue = ring->queues[qid]; + if (!queue) { + queue = fuse_uring_create_queue(ring, qid); + if (!queue) + return err; + } + + /* + * The created queue above does not need to be destructed in + * case of entry errors below, will be done at ring destruction time. + */ + + ent = fuse_uring_create_ring_ent(cmd, queue); + if (IS_ERR(ent)) + return PTR_ERR(ent); + + fuse_uring_do_register(ent, cmd, issue_flags); + + return 0; +} + +/* + * Entry function from io_uring to handle the given passthrough command + * (op code IORING_OP_URING_CMD) + */ +int fuse_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags) +{ + struct fuse_dev *fud; + struct fuse_conn *fc; + u32 cmd_op = cmd->cmd_op; + int err; + + if ((unlikely(issue_flags & IO_URING_F_CANCEL))) { + fuse_uring_cancel(cmd, issue_flags); + return 0; + } + + /* This extra SQE size holds struct fuse_uring_cmd_req */ + if (!(issue_flags & IO_URING_F_SQE128)) + return -EINVAL; + + fud = fuse_get_dev(cmd->file); + if (!fud) { + pr_info_ratelimited("No fuse device found\n"); + return -ENOTCONN; + } + fc = fud->fc; + + /* Once a connection has io-uring enabled on it, it can't be disabled */ + if (!enable_uring && !fc->io_uring) { + pr_info_ratelimited("fuse-io-uring is disabled\n"); + return -EOPNOTSUPP; + } + + if (fc->aborted) + return -ECONNABORTED; + if (!fc->connected) + return -ENOTCONN; + + /* + * fuse_uring_register() needs the ring to be initialized, + * we need to know the max payload size + */ + if (!fc->initialized) + return -EAGAIN; + + switch (cmd_op) { + case FUSE_IO_URING_CMD_REGISTER: + err = fuse_uring_register(cmd, issue_flags, fc); + if (err) { + pr_info_once("FUSE_IO_URING_CMD_REGISTER failed err=%d\n", + err); + fc->io_uring = 0; + wake_up_all(&fc->blocked_waitq); + return err; + } + break; + case FUSE_IO_URING_CMD_COMMIT_AND_FETCH: + err = fuse_uring_commit_fetch(cmd, issue_flags, fc); + if (err) { + pr_info_once("FUSE_IO_URING_COMMIT_AND_FETCH failed err=%d\n", + err); + return err; + } + break; + default: + return -EINVAL; + } + + return -EIOCBQUEUED; +} + +static void fuse_uring_send(struct fuse_ring_ent *ent, struct io_uring_cmd *cmd, + ssize_t ret, unsigned int issue_flags) +{ + struct fuse_ring_queue *queue = ent->queue; + + spin_lock(&queue->lock); + ent->state = FRRS_USERSPACE; + list_move(&ent->list, &queue->ent_in_userspace); + ent->cmd = NULL; + spin_unlock(&queue->lock); + + io_uring_cmd_done(cmd, ret, 0, issue_flags); +} + +/* + * This prepares and sends the ring request in fuse-uring task context. + * User buffers are not mapped yet - the application does not have permission + * to write to it - this has to be executed in ring task context. + */ +static void fuse_uring_send_in_task(struct io_uring_cmd *cmd, + unsigned int issue_flags) +{ + struct fuse_ring_ent *ent = uring_cmd_to_ring_ent(cmd); + struct fuse_ring_queue *queue = ent->queue; + int err; + + if (!(issue_flags & IO_URING_F_TASK_DEAD)) { + err = fuse_uring_prepare_send(ent, ent->fuse_req); + if (err) { + fuse_uring_next_fuse_req(ent, queue, issue_flags); + return; + } + } else { + err = -ECANCELED; + } + + fuse_uring_send(ent, cmd, err, issue_flags); +} + +static struct fuse_ring_queue *fuse_uring_task_to_queue(struct fuse_ring *ring) +{ + unsigned int qid; + struct fuse_ring_queue *queue; + + qid = task_cpu(current); + + if (WARN_ONCE(qid >= ring->nr_queues, + "Core number (%u) exceeds nr queues (%zu)\n", qid, + ring->nr_queues)) + qid = 0; + + queue = ring->queues[qid]; + WARN_ONCE(!queue, "Missing queue for qid %d\n", qid); + + return queue; +} + +static void fuse_uring_dispatch_ent(struct fuse_ring_ent *ent) +{ + struct io_uring_cmd *cmd = ent->cmd; + + uring_cmd_set_ring_ent(cmd, ent); + io_uring_cmd_complete_in_task(cmd, fuse_uring_send_in_task); +} + +/* queue a fuse request and send it if a ring entry is available */ +void fuse_uring_queue_fuse_req(struct fuse_iqueue *fiq, struct fuse_req *req) +{ + struct fuse_conn *fc = req->fm->fc; + struct fuse_ring *ring = fc->ring; + struct fuse_ring_queue *queue; + struct fuse_ring_ent *ent = NULL; + int err; + + err = -EINVAL; + queue = fuse_uring_task_to_queue(ring); + if (!queue) + goto err; + + if (req->in.h.opcode != FUSE_NOTIFY_REPLY) + req->in.h.unique = fuse_get_unique(fiq); + + spin_lock(&queue->lock); + err = -ENOTCONN; + if (unlikely(queue->stopped)) + goto err_unlock; + + ent = list_first_entry_or_null(&queue->ent_avail_queue, + struct fuse_ring_ent, list); + if (ent) + fuse_uring_add_req_to_ring_ent(ent, req); + else + list_add_tail(&req->list, &queue->fuse_req_queue); + spin_unlock(&queue->lock); + + if (ent) + fuse_uring_dispatch_ent(ent); + + return; + +err_unlock: + spin_unlock(&queue->lock); +err: + req->out.h.error = err; + clear_bit(FR_PENDING, &req->flags); + fuse_request_end(req); +} + +bool fuse_uring_queue_bq_req(struct fuse_req *req) +{ + struct fuse_conn *fc = req->fm->fc; + struct fuse_ring *ring = fc->ring; + struct fuse_ring_queue *queue; + struct fuse_ring_ent *ent = NULL; + + queue = fuse_uring_task_to_queue(ring); + if (!queue) + return false; + + spin_lock(&queue->lock); + if (unlikely(queue->stopped)) { + spin_unlock(&queue->lock); + return false; + } + + list_add_tail(&req->list, &queue->fuse_req_bg_queue); + + ent = list_first_entry_or_null(&queue->ent_avail_queue, + struct fuse_ring_ent, list); + spin_lock(&fc->bg_lock); + fc->num_background++; + if (fc->num_background == fc->max_background) + fc->blocked = 1; + fuse_uring_flush_bg(queue); + spin_unlock(&fc->bg_lock); + + /* + * Due to bg_queue flush limits there might be other bg requests + * in the queue that need to be handled first. Or no further req + * might be available. + */ + req = list_first_entry_or_null(&queue->fuse_req_queue, struct fuse_req, + list); + if (ent && req) { + fuse_uring_add_req_to_ring_ent(ent, req); + spin_unlock(&queue->lock); + + fuse_uring_dispatch_ent(ent); + } else { + spin_unlock(&queue->lock); + } + + return true; +} + +static const struct fuse_iqueue_ops fuse_io_uring_ops = { + /* should be send over io-uring as enhancement */ + .send_forget = fuse_dev_queue_forget, + + /* + * could be send over io-uring, but interrupts should be rare, + * no need to make the code complex + */ + .send_interrupt = fuse_dev_queue_interrupt, + .send_req = fuse_uring_queue_fuse_req, +}; diff --git a/fs/fuse/dev_uring_i.h b/fs/fuse/dev_uring_i.h new file mode 100644 index 000000000000..2102b3d0c1ae --- /dev/null +++ b/fs/fuse/dev_uring_i.h @@ -0,0 +1,205 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * FUSE: Filesystem in Userspace + * Copyright (c) 2023-2024 DataDirect Networks. + */ + +#ifndef _FS_FUSE_DEV_URING_I_H +#define _FS_FUSE_DEV_URING_I_H + +#include "fuse_i.h" + +#ifdef CONFIG_FUSE_IO_URING + +#define FUSE_URING_TEARDOWN_TIMEOUT (5 * HZ) +#define FUSE_URING_TEARDOWN_INTERVAL (HZ/20) + +enum fuse_ring_req_state { + FRRS_INVALID = 0, + + /* The ring entry received from userspace and it is being processed */ + FRRS_COMMIT, + + /* The ring entry is waiting for new fuse requests */ + FRRS_AVAILABLE, + + /* The ring entry got assigned a fuse req */ + FRRS_FUSE_REQ, + + /* The ring entry is in or on the way to user space */ + FRRS_USERSPACE, + + /* The ring entry is in teardown */ + FRRS_TEARDOWN, + + /* The ring entry is released, but not freed yet */ + FRRS_RELEASED, +}; + +/** A fuse ring entry, part of the ring queue */ +struct fuse_ring_ent { + /* userspace buffer */ + struct fuse_uring_req_header __user *headers; + void __user *payload; + + /* the ring queue that owns the request */ + struct fuse_ring_queue *queue; + + /* fields below are protected by queue->lock */ + + struct io_uring_cmd *cmd; + + struct list_head list; + + enum fuse_ring_req_state state; + + struct fuse_req *fuse_req; +}; + +struct fuse_ring_queue { + /* + * back pointer to the main fuse uring structure that holds this + * queue + */ + struct fuse_ring *ring; + + /* queue id, corresponds to the cpu core */ + unsigned int qid; + + /* + * queue lock, taken when any value in the queue changes _and_ also + * a ring entry state changes. + */ + spinlock_t lock; + + /* available ring entries (struct fuse_ring_ent) */ + struct list_head ent_avail_queue; + + /* + * entries in the process of being committed or in the process + * to be sent to userspace + */ + struct list_head ent_w_req_queue; + struct list_head ent_commit_queue; + + /* entries in userspace */ + struct list_head ent_in_userspace; + + /* entries that are released */ + struct list_head ent_released; + + /* fuse requests waiting for an entry slot */ + struct list_head fuse_req_queue; + + /* background fuse requests */ + struct list_head fuse_req_bg_queue; + + struct fuse_pqueue fpq; + + unsigned int active_background; + + bool stopped; +}; + +/** + * Describes if uring is for communication and holds alls the data needed + * for uring communication + */ +struct fuse_ring { + /* back pointer */ + struct fuse_conn *fc; + + /* number of ring queues */ + size_t nr_queues; + + /* maximum payload/arg size */ + size_t max_payload_sz; + + struct fuse_ring_queue **queues; + + /* + * Log ring entry states on stop when entries cannot be released + */ + unsigned int stop_debug_log : 1; + + wait_queue_head_t stop_waitq; + + /* async tear down */ + struct delayed_work async_teardown_work; + + /* log */ + unsigned long teardown_time; + + atomic_t queue_refs; + + bool ready; +}; + +bool fuse_uring_enabled(void); +void fuse_uring_destruct(struct fuse_conn *fc); +void fuse_uring_stop_queues(struct fuse_ring *ring); +void fuse_uring_abort_end_requests(struct fuse_ring *ring); +int fuse_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags); +void fuse_uring_queue_fuse_req(struct fuse_iqueue *fiq, struct fuse_req *req); +bool fuse_uring_queue_bq_req(struct fuse_req *req); + +static inline void fuse_uring_abort(struct fuse_conn *fc) +{ + struct fuse_ring *ring = fc->ring; + + if (ring == NULL) + return; + + if (atomic_read(&ring->queue_refs) > 0) { + fuse_uring_abort_end_requests(ring); + fuse_uring_stop_queues(ring); + } +} + +static inline void fuse_uring_wait_stopped_queues(struct fuse_conn *fc) +{ + struct fuse_ring *ring = fc->ring; + + if (ring) + wait_event(ring->stop_waitq, + atomic_read(&ring->queue_refs) == 0); +} + +static inline bool fuse_uring_ready(struct fuse_conn *fc) +{ + return fc->ring && fc->ring->ready; +} + +#else /* CONFIG_FUSE_IO_URING */ + +struct fuse_ring; + +static inline void fuse_uring_create(struct fuse_conn *fc) +{ +} + +static inline void fuse_uring_destruct(struct fuse_conn *fc) +{ +} + +static inline bool fuse_uring_enabled(void) +{ + return false; +} + +static inline void fuse_uring_abort(struct fuse_conn *fc) +{ +} + +static inline void fuse_uring_wait_stopped_queues(struct fuse_conn *fc) +{ +} + +static inline bool fuse_uring_ready(struct fuse_conn *fc) +{ + return false; +} + +#endif /* CONFIG_FUSE_IO_URING */ + +#endif /* _FS_FUSE_DEV_URING_I_H */ diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index bf057cf7098d..be693a8a1010 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -175,9 +175,10 @@ static void fuse_lookup_init(struct fuse_conn *fc, struct fuse_args *args, memset(outarg, 0, sizeof(struct fuse_entry_out)); args->opcode = FUSE_LOOKUP; args->nodeid = nodeid; - args->in_numargs = 1; - args->in_args[0].size = name->len + 1; - args->in_args[0].value = name->name; + args->in_numargs = 2; + fuse_set_zero_arg0(args); + args->in_args[1].size = name->len + 1; + args->in_args[1].value = name->name; args->out_numargs = 1; args->out_args[0].size = sizeof(struct fuse_entry_out); args->out_args[0].value = outarg; @@ -929,11 +930,12 @@ static int fuse_symlink(struct mnt_idmap *idmap, struct inode *dir, FUSE_ARGS(args); args.opcode = FUSE_SYMLINK; - args.in_numargs = 2; - args.in_args[0].size = entry->d_name.len + 1; - args.in_args[0].value = entry->d_name.name; - args.in_args[1].size = len; - args.in_args[1].value = link; + args.in_numargs = 3; + fuse_set_zero_arg0(&args); + args.in_args[1].size = entry->d_name.len + 1; + args.in_args[1].value = entry->d_name.name; + args.in_args[2].size = len; + args.in_args[2].value = link; return create_new_entry(idmap, fm, &args, dir, entry, S_IFLNK); } @@ -993,9 +995,10 @@ static int fuse_unlink(struct inode *dir, struct dentry *entry) args.opcode = FUSE_UNLINK; args.nodeid = get_node_id(dir); - args.in_numargs = 1; - args.in_args[0].size = entry->d_name.len + 1; - args.in_args[0].value = entry->d_name.name; + args.in_numargs = 2; + fuse_set_zero_arg0(&args); + args.in_args[1].size = entry->d_name.len + 1; + args.in_args[1].value = entry->d_name.name; err = fuse_simple_request(fm, &args); if (!err) { fuse_dir_changed(dir); @@ -1016,9 +1019,10 @@ static int fuse_rmdir(struct inode *dir, struct dentry *entry) args.opcode = FUSE_RMDIR; args.nodeid = get_node_id(dir); - args.in_numargs = 1; - args.in_args[0].size = entry->d_name.len + 1; - args.in_args[0].value = entry->d_name.name; + args.in_numargs = 2; + fuse_set_zero_arg0(&args); + args.in_args[1].size = entry->d_name.len + 1; + args.in_args[1].value = entry->d_name.name; err = fuse_simple_request(fm, &args); if (!err) { fuse_dir_changed(dir); diff --git a/fs/fuse/fuse_dev_i.h b/fs/fuse/fuse_dev_i.h new file mode 100644 index 000000000000..3b2bfe1248d3 --- /dev/null +++ b/fs/fuse/fuse_dev_i.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * FUSE: Filesystem in Userspace + * Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu> + */ +#ifndef _FS_FUSE_DEV_I_H +#define _FS_FUSE_DEV_I_H + +#include <linux/types.h> + +/* Ordinary requests have even IDs, while interrupts IDs are odd */ +#define FUSE_INT_REQ_BIT (1ULL << 0) +#define FUSE_REQ_ID_STEP (1ULL << 1) + +struct fuse_arg; +struct fuse_args; +struct fuse_pqueue; +struct fuse_req; +struct fuse_iqueue; +struct fuse_forget_link; + +struct fuse_copy_state { + int write; + struct fuse_req *req; + struct iov_iter *iter; + struct pipe_buffer *pipebufs; + struct pipe_buffer *currbuf; + struct pipe_inode_info *pipe; + unsigned long nr_segs; + struct page *pg; + unsigned int len; + unsigned int offset; + unsigned int move_pages:1; + unsigned int is_uring:1; + struct { + unsigned int copied_sz; /* copied size into the user buffer */ + } ring; +}; + +static inline struct fuse_dev *fuse_get_dev(struct file *file) +{ + /* + * Lockless access is OK, because file->private data is set + * once during mount and is valid until the file is released. + */ + return READ_ONCE(file->private_data); +} + +unsigned int fuse_req_hash(u64 unique); +struct fuse_req *fuse_request_find(struct fuse_pqueue *fpq, u64 unique); + +void fuse_dev_end_requests(struct list_head *head); + +void fuse_copy_init(struct fuse_copy_state *cs, int write, + struct iov_iter *iter); +int fuse_copy_args(struct fuse_copy_state *cs, unsigned int numargs, + unsigned int argpages, struct fuse_arg *args, + int zeroing); +int fuse_copy_out_args(struct fuse_copy_state *cs, struct fuse_args *args, + unsigned int nbytes); +void fuse_dev_queue_forget(struct fuse_iqueue *fiq, + struct fuse_forget_link *forget); +void fuse_dev_queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req); + +#endif + diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index 74744c6f2860..fee96fe7887b 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -310,7 +310,7 @@ struct fuse_args { bool is_ext:1; bool is_pinned:1; bool invalidate_vmap:1; - struct fuse_in_arg in_args[3]; + struct fuse_in_arg in_args[4]; struct fuse_arg out_args[2]; void (*end)(struct fuse_mount *fm, struct fuse_args *args, int error); /* Used for kvec iter backed by vmalloc address */ @@ -438,6 +438,10 @@ struct fuse_req { /** fuse_mount this request belongs to */ struct fuse_mount *fm; + +#ifdef CONFIG_FUSE_IO_URING + void *ring_entry; +#endif }; struct fuse_iqueue; @@ -863,6 +867,9 @@ struct fuse_conn { /* Use pages instead of pointer for kernel I/O */ unsigned int use_pages_for_kvec_io:1; + /* Use io_uring for communication */ + unsigned int io_uring; + /** Maximum stack depth for passthrough backing files */ int max_stack_depth; @@ -923,6 +930,11 @@ struct fuse_conn { /** IDR for backing files ids */ struct idr backing_files_map; #endif + +#ifdef CONFIG_FUSE_IO_URING + /** uring connection information*/ + struct fuse_ring *ring; +#endif }; /* @@ -947,6 +959,19 @@ struct fuse_mount { struct rcu_head rcu; }; +/* + * Empty header for FUSE opcodes without specific header needs. + * Used as a placeholder in args->in_args[0] for consistency + * across all FUSE operations, simplifying request handling. + */ +struct fuse_zero_header {}; + +static inline void fuse_set_zero_arg0(struct fuse_args *args) +{ + args->in_args[0].size = sizeof(struct fuse_zero_header); + args->in_args[0].value = NULL; +} + static inline struct fuse_mount *get_fuse_mount_super(struct super_block *sb) { return sb->s_fs_info; @@ -1220,6 +1245,11 @@ void fuse_change_entry_timeout(struct dentry *entry, struct fuse_entry_out *o); struct fuse_conn *fuse_conn_get(struct fuse_conn *fc); /** + * Initialize the fuse processing queue + */ +void fuse_pqueue_init(struct fuse_pqueue *fpq); + +/** * Initialize fuse_conn */ void fuse_conn_init(struct fuse_conn *fc, struct fuse_mount *fm, diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index 3ce4f4e81d09..e9db2cb8c150 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c @@ -7,6 +7,7 @@ */ #include "fuse_i.h" +#include "dev_uring_i.h" #include <linux/pagemap.h> #include <linux/slab.h> @@ -937,7 +938,7 @@ static void fuse_iqueue_init(struct fuse_iqueue *fiq, fiq->priv = priv; } -static void fuse_pqueue_init(struct fuse_pqueue *fpq) +void fuse_pqueue_init(struct fuse_pqueue *fpq) { unsigned int i; @@ -992,6 +993,8 @@ static void delayed_release(struct rcu_head *p) { struct fuse_conn *fc = container_of(p, struct fuse_conn, rcu); + fuse_uring_destruct(fc); + put_user_ns(fc->user_ns); fc->release(fc); } @@ -1387,6 +1390,8 @@ static void process_init_reply(struct fuse_mount *fm, struct fuse_args *args, else ok = false; } + if (flags & FUSE_OVER_IO_URING && fuse_uring_enabled()) + fc->io_uring = 1; } else { ra_pages = fc->max_read / PAGE_SIZE; fc->no_lock = 1; @@ -1446,6 +1451,13 @@ void fuse_send_init(struct fuse_mount *fm) if (IS_ENABLED(CONFIG_FUSE_PASSTHROUGH)) flags |= FUSE_PASSTHROUGH; + /* + * This is just an information flag for fuse server. No need to check + * the reply - server is either sending IORING_OP_URING_CMD or not. + */ + if (fuse_uring_enabled()) + flags |= FUSE_OVER_IO_URING; + ia->in.flags = flags; ia->in.flags2 = flags >> 32; diff --git a/fs/fuse/xattr.c b/fs/fuse/xattr.c index 9f568d345c51..93dfb06b6cea 100644 --- a/fs/fuse/xattr.c +++ b/fs/fuse/xattr.c @@ -164,9 +164,10 @@ int fuse_removexattr(struct inode *inode, const char *name) args.opcode = FUSE_REMOVEXATTR; args.nodeid = get_node_id(inode); - args.in_numargs = 1; - args.in_args[0].size = strlen(name) + 1; - args.in_args[0].value = name; + args.in_numargs = 2; + fuse_set_zero_arg0(&args); + args.in_args[1].size = strlen(name) + 1; + args.in_args[1].value = name; err = fuse_simple_request(fm, &args); if (err == -ENOSYS) { fm->fc->no_removexattr = 1; diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig index 0eb20012792f..d3f76101ad4b 100644 --- a/fs/nfs/Kconfig +++ b/fs/nfs/Kconfig @@ -170,7 +170,8 @@ config ROOT_NFS config NFS_FSCACHE bool "Provide NFS client caching support" - depends on NFS_FS=m && NETFS_SUPPORT || NFS_FS=y && NETFS_SUPPORT=y + depends on NFS_FS + select NETFS_SUPPORT select FSCACHE help Say Y here if you want NFS data to be cached locally on disc through diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c index 7832fb0369a1..8397c43358bd 100644 --- a/fs/nfs/callback_proc.c +++ b/fs/nfs/callback_proc.c @@ -718,7 +718,7 @@ __be32 nfs4_callback_offload(void *data, void *dummy, copy = kzalloc(sizeof(struct nfs4_copy_state), GFP_KERNEL); if (!copy) - return htonl(NFS4ERR_SERVERFAULT); + return cpu_to_be32(NFS4ERR_DELAY); spin_lock(&cps->clp->cl_lock); rcu_read_lock(); diff --git a/fs/nfs/client.c b/fs/nfs/client.c index 550ca934c9cf..3b0918ade53c 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -38,7 +38,7 @@ #include <linux/sunrpc/bc_xprt.h> #include <linux/nsproxy.h> #include <linux/pid_namespace.h> - +#include <linux/nfslocalio.h> #include "nfs4_fs.h" #include "callback.h" @@ -186,7 +186,7 @@ struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_init) seqlock_init(&clp->cl_boot_lock); ktime_get_real_ts64(&clp->cl_nfssvc_boot); nfs_uuid_init(&clp->cl_uuid); - spin_lock_init(&clp->cl_localio_lock); + INIT_WORK(&clp->cl_local_probe_work, nfs_local_probe_async_work); #endif /* CONFIG_NFS_LOCALIO */ clp->cl_principal = "*"; @@ -244,7 +244,7 @@ static void pnfs_init_server(struct nfs_server *server) */ void nfs_free_client(struct nfs_client *clp) { - nfs_local_disable(clp); + nfs_localio_disable_client(clp); /* -EIO all pending I/O */ if (!IS_ERR(clp->cl_rpcclient)) diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index b08dbe96bc57..f45beea92d03 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c @@ -303,6 +303,7 @@ static void nfs_read_sync_pgio_error(struct list_head *head, int error) static void nfs_direct_pgio_init(struct nfs_pgio_header *hdr) { get_dreq(hdr->dreq); + set_bit(NFS_IOHDR_ODIRECT, &hdr->flags); } static const struct nfs_pgio_completion_ops nfs_direct_read_completion_ops = { diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c index f78115c6c2c1..98b45b636be3 100644 --- a/fs/nfs/flexfilelayout/flexfilelayout.c +++ b/fs/nfs/flexfilelayout/flexfilelayout.c @@ -164,18 +164,17 @@ decode_name(struct xdr_stream *xdr, u32 *id) } static struct nfsd_file * -ff_local_open_fh(struct nfs_client *clp, const struct cred *cred, +ff_local_open_fh(struct pnfs_layout_segment *lseg, u32 ds_idx, + struct nfs_client *clp, const struct cred *cred, struct nfs_fh *fh, fmode_t mode) { - if (mode & FMODE_WRITE) { - /* - * Always request read and write access since this corresponds - * to a rw layout. - */ - mode |= FMODE_READ; - } +#if IS_ENABLED(CONFIG_NFS_LOCALIO) + struct nfs4_ff_layout_mirror *mirror = FF_LAYOUT_COMP(lseg, ds_idx); - return nfs_local_open_fh(clp, cred, fh, mode); + return nfs_local_open_fh(clp, cred, fh, &mirror->nfl, mode); +#else + return NULL; +#endif } static bool ff_mirror_match_fh(const struct nfs4_ff_layout_mirror *m1, @@ -247,6 +246,7 @@ static struct nfs4_ff_layout_mirror *ff_layout_alloc_mirror(gfp_t gfp_flags) spin_lock_init(&mirror->lock); refcount_set(&mirror->ref, 1); INIT_LIST_HEAD(&mirror->mirrors); + nfs_localio_file_init(&mirror->nfl); } return mirror; } @@ -257,6 +257,7 @@ static void ff_layout_free_mirror(struct nfs4_ff_layout_mirror *mirror) ff_layout_remove_mirror(mirror); kfree(mirror->fh_versions); + nfs_close_local_fh(&mirror->nfl); cred = rcu_access_pointer(mirror->ro_cred); put_cred(cred); cred = rcu_access_pointer(mirror->rw_cred); @@ -847,6 +848,9 @@ ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs4_pnfs_ds *ds; u32 ds_idx; + if (NFS_SERVER(pgio->pg_inode)->flags & + (NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR)) + pgio->pg_maxretrans = io_maxretrans; retry: pnfs_generic_pg_check_layout(pgio, req); /* Use full layout for now */ @@ -860,6 +864,8 @@ retry: if (!pgio->pg_lseg) goto out_nolseg; } + /* Reset wb_nio, since getting layout segment was successful */ + req->wb_nio = 0; ds = ff_layout_get_ds_for_read(pgio, &ds_idx); if (!ds) { @@ -876,14 +882,24 @@ retry: pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize; pgio->pg_mirror_idx = ds_idx; - - if (NFS_SERVER(pgio->pg_inode)->flags & - (NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR)) - pgio->pg_maxretrans = io_maxretrans; return; out_nolseg: - if (pgio->pg_error < 0) - return; + if (pgio->pg_error < 0) { + if (pgio->pg_error != -EAGAIN) + return; + /* Retry getting layout segment if lower layer returned -EAGAIN */ + if (pgio->pg_maxretrans && req->wb_nio++ > pgio->pg_maxretrans) { + if (NFS_SERVER(pgio->pg_inode)->flags & NFS_MOUNT_SOFTERR) + pgio->pg_error = -ETIMEDOUT; + else + pgio->pg_error = -EIO; + return; + } + pgio->pg_error = 0; + /* Sleep for 1 second before retrying */ + ssleep(1); + goto retry; + } out_mds: trace_pnfs_mds_fallback_pg_init_read(pgio->pg_inode, 0, NFS4_MAX_UINT64, IOMODE_READ, @@ -1820,7 +1836,7 @@ ff_layout_read_pagelist(struct nfs_pgio_header *hdr) hdr->mds_offset = offset; /* Start IO accounting for local read */ - localio = ff_local_open_fh(ds->ds_clp, ds_cred, fh, FMODE_READ); + localio = ff_local_open_fh(lseg, idx, ds->ds_clp, ds_cred, fh, FMODE_READ); if (localio) { hdr->task.tk_start = ktime_get(); ff_layout_read_record_layoutstats_start(&hdr->task, hdr); @@ -1896,7 +1912,7 @@ ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync) hdr->args.offset = offset; /* Start IO accounting for local write */ - localio = ff_local_open_fh(ds->ds_clp, ds_cred, fh, + localio = ff_local_open_fh(lseg, idx, ds->ds_clp, ds_cred, fh, FMODE_READ|FMODE_WRITE); if (localio) { hdr->task.tk_start = ktime_get(); @@ -1981,7 +1997,7 @@ static int ff_layout_initiate_commit(struct nfs_commit_data *data, int how) data->args.fh = fh; /* Start IO accounting for local commit */ - localio = ff_local_open_fh(ds->ds_clp, ds_cred, fh, + localio = ff_local_open_fh(lseg, idx, ds->ds_clp, ds_cred, fh, FMODE_READ|FMODE_WRITE); if (localio) { data->task.tk_start = ktime_get(); diff --git a/fs/nfs/flexfilelayout/flexfilelayout.h b/fs/nfs/flexfilelayout/flexfilelayout.h index f84b3fb0dddd..095df09017a5 100644 --- a/fs/nfs/flexfilelayout/flexfilelayout.h +++ b/fs/nfs/flexfilelayout/flexfilelayout.h @@ -83,6 +83,7 @@ struct nfs4_ff_layout_mirror { nfs4_stateid stateid; const struct cred __rcu *ro_cred; const struct cred __rcu *rw_cred; + struct nfs_file_localio nfl; refcount_t ref; spinlock_t lock; unsigned long flags; diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 596f35170137..1aa67fca69b2 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -1137,6 +1137,8 @@ struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry, ctx->lock_context.open_context = ctx; INIT_LIST_HEAD(&ctx->list); ctx->mdsthreshold = NULL; + nfs_localio_file_init(&ctx->nfl); + return ctx; } EXPORT_SYMBOL_GPL(alloc_nfs_open_context); @@ -1168,6 +1170,7 @@ static void __put_nfs_open_context(struct nfs_open_context *ctx, int is_sync) nfs_sb_deactive(sb); put_rpccred(rcu_dereference_protected(ctx->ll_cred, 1)); kfree(ctx->mdsthreshold); + nfs_close_local_fh(&ctx->nfl); kfree_rcu(ctx, rcu_head); } diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index e564bd11ba60..fae2c7ae4acc 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -455,11 +455,13 @@ extern int nfs_wait_bit_killable(struct wait_bit_key *key, int mode); #if IS_ENABLED(CONFIG_NFS_LOCALIO) /* localio.c */ -extern void nfs_local_disable(struct nfs_client *); extern void nfs_local_probe(struct nfs_client *); +extern void nfs_local_probe_async(struct nfs_client *); +extern void nfs_local_probe_async_work(struct work_struct *); extern struct nfsd_file *nfs_local_open_fh(struct nfs_client *, const struct cred *, struct nfs_fh *, + struct nfs_file_localio *, const fmode_t); extern int nfs_local_doio(struct nfs_client *, struct nfsd_file *, @@ -471,11 +473,12 @@ extern int nfs_local_commit(struct nfsd_file *, extern bool nfs_server_is_local(const struct nfs_client *clp); #else /* CONFIG_NFS_LOCALIO */ -static inline void nfs_local_disable(struct nfs_client *clp) {} static inline void nfs_local_probe(struct nfs_client *clp) {} +static inline void nfs_local_probe_async(struct nfs_client *clp) {} static inline struct nfsd_file * nfs_local_open_fh(struct nfs_client *clp, const struct cred *cred, - struct nfs_fh *fh, const fmode_t mode) + struct nfs_fh *fh, struct nfs_file_localio *nfl, + const fmode_t mode) { return NULL; } diff --git a/fs/nfs/localio.c b/fs/nfs/localio.c index 4b8618cf114c..5c21caeae075 100644 --- a/fs/nfs/localio.c +++ b/fs/nfs/localio.c @@ -35,6 +35,7 @@ struct nfs_local_kiocb { struct bio_vec *bvec; struct nfs_pgio_header *hdr; struct work_struct work; + void (*aio_complete_work)(struct work_struct *); struct nfsd_file *localio; }; @@ -48,9 +49,14 @@ struct nfs_local_fsync_ctx { static bool localio_enabled __read_mostly = true; module_param(localio_enabled, bool, 0644); +static bool localio_O_DIRECT_semantics __read_mostly = false; +module_param(localio_O_DIRECT_semantics, bool, 0644); +MODULE_PARM_DESC(localio_O_DIRECT_semantics, + "LOCALIO will use O_DIRECT semantics to filesystem."); + static inline bool nfs_client_is_local(const struct nfs_client *clp) { - return !!test_bit(NFS_CS_LOCAL_IO, &clp->cl_flags); + return !!rcu_access_pointer(clp->cl_uuid.net); } bool nfs_server_is_local(const struct nfs_client *clp) @@ -116,30 +122,6 @@ const struct rpc_program nfslocalio_program = { }; /* - * nfs_local_enable - enable local i/o for an nfs_client - */ -static void nfs_local_enable(struct nfs_client *clp) -{ - spin_lock(&clp->cl_localio_lock); - set_bit(NFS_CS_LOCAL_IO, &clp->cl_flags); - trace_nfs_local_enable(clp); - spin_unlock(&clp->cl_localio_lock); -} - -/* - * nfs_local_disable - disable local i/o for an nfs_client - */ -void nfs_local_disable(struct nfs_client *clp) -{ - spin_lock(&clp->cl_localio_lock); - if (test_and_clear_bit(NFS_CS_LOCAL_IO, &clp->cl_flags)) { - trace_nfs_local_disable(clp); - nfs_uuid_invalidate_one_client(&clp->cl_uuid); - } - spin_unlock(&clp->cl_localio_lock); -} - -/* * nfs_init_localioclient - Initialise an NFS localio client connection */ static struct rpc_clnt *nfs_init_localioclient(struct nfs_client *clp) @@ -178,7 +160,7 @@ static bool nfs_server_uuid_is_local(struct nfs_client *clp) rpc_shutdown_client(rpcclient_localio); /* Server is only local if it initialized required struct members */ - if (status || !clp->cl_uuid.net || !clp->cl_uuid.dom) + if (status || !rcu_access_pointer(clp->cl_uuid.net) || !clp->cl_uuid.dom) return false; return true; @@ -194,44 +176,64 @@ void nfs_local_probe(struct nfs_client *clp) /* Disallow localio if disabled via sysfs or AUTH_SYS isn't used */ if (!localio_enabled || clp->cl_rpcclient->cl_auth->au_flavor != RPC_AUTH_UNIX) { - nfs_local_disable(clp); + nfs_localio_disable_client(clp); return; } if (nfs_client_is_local(clp)) { /* If already enabled, disable and re-enable */ - nfs_local_disable(clp); + nfs_localio_disable_client(clp); } if (!nfs_uuid_begin(&clp->cl_uuid)) return; if (nfs_server_uuid_is_local(clp)) - nfs_local_enable(clp); + nfs_localio_enable_client(clp); nfs_uuid_end(&clp->cl_uuid); } EXPORT_SYMBOL_GPL(nfs_local_probe); +void nfs_local_probe_async_work(struct work_struct *work) +{ + struct nfs_client *clp = + container_of(work, struct nfs_client, cl_local_probe_work); + + nfs_local_probe(clp); +} + +void nfs_local_probe_async(struct nfs_client *clp) +{ + queue_work(nfsiod_workqueue, &clp->cl_local_probe_work); +} +EXPORT_SYMBOL_GPL(nfs_local_probe_async); + +static inline struct nfsd_file *nfs_local_file_get(struct nfsd_file *nf) +{ + return nfs_to->nfsd_file_get(nf); +} + +static inline void nfs_local_file_put(struct nfsd_file *nf) +{ + nfs_to->nfsd_file_put(nf); +} + /* - * nfs_local_open_fh - open a local filehandle in terms of nfsd_file + * __nfs_local_open_fh - open a local filehandle in terms of nfsd_file. * - * Returns a pointer to a struct nfsd_file or NULL + * Returns a pointer to a struct nfsd_file or ERR_PTR. + * Caller must release returned nfsd_file with nfs_to_nfsd_file_put_local(). */ -struct nfsd_file * -nfs_local_open_fh(struct nfs_client *clp, const struct cred *cred, - struct nfs_fh *fh, const fmode_t mode) +static struct nfsd_file * +__nfs_local_open_fh(struct nfs_client *clp, const struct cred *cred, + struct nfs_fh *fh, struct nfs_file_localio *nfl, + const fmode_t mode) { struct nfsd_file *localio; - int status; - - if (!nfs_server_is_local(clp)) - return NULL; - if (mode & ~(FMODE_READ | FMODE_WRITE)) - return NULL; localio = nfs_open_local_fh(&clp->cl_uuid, clp->cl_rpcclient, - cred, fh, mode); + cred, fh, nfl, mode); if (IS_ERR(localio)) { - status = PTR_ERR(localio); + int status = PTR_ERR(localio); trace_nfs_local_open_fh(fh, mode, status); switch (status) { case -ENOMEM: @@ -240,10 +242,59 @@ nfs_local_open_fh(struct nfs_client *clp, const struct cred *cred, /* Revalidate localio, will disable if unsupported */ nfs_local_probe(clp); } - return NULL; } return localio; } + +/* + * nfs_local_open_fh - open a local filehandle in terms of nfsd_file. + * First checking if the open nfsd_file is already cached, otherwise + * must __nfs_local_open_fh and insert the nfsd_file in nfs_file_localio. + * + * Returns a pointer to a struct nfsd_file or NULL. + */ +struct nfsd_file * +nfs_local_open_fh(struct nfs_client *clp, const struct cred *cred, + struct nfs_fh *fh, struct nfs_file_localio *nfl, + const fmode_t mode) +{ + struct nfsd_file *nf, *new, __rcu **pnf; + + if (!nfs_server_is_local(clp)) + return NULL; + if (mode & ~(FMODE_READ | FMODE_WRITE)) + return NULL; + + if (mode & FMODE_WRITE) + pnf = &nfl->rw_file; + else + pnf = &nfl->ro_file; + + new = NULL; + rcu_read_lock(); + nf = rcu_dereference(*pnf); + if (!nf) { + rcu_read_unlock(); + new = __nfs_local_open_fh(clp, cred, fh, nfl, mode); + if (IS_ERR(new)) + return NULL; + /* try to swap in the pointer */ + spin_lock(&clp->cl_uuid.lock); + nf = rcu_dereference_protected(*pnf, 1); + if (!nf) { + nf = new; + new = NULL; + rcu_assign_pointer(*pnf, nf); + } + spin_unlock(&clp->cl_uuid.lock); + rcu_read_lock(); + } + nf = nfs_local_file_get(nf); + rcu_read_unlock(); + if (new) + nfs_to_nfsd_file_put_local(new); + return nf; +} EXPORT_SYMBOL_GPL(nfs_local_open_fh); static struct bio_vec * @@ -285,10 +336,19 @@ nfs_local_iocb_alloc(struct nfs_pgio_header *hdr, kfree(iocb); return NULL; } - init_sync_kiocb(&iocb->kiocb, file); + + if (localio_O_DIRECT_semantics && + test_bit(NFS_IOHDR_ODIRECT, &hdr->flags)) { + iocb->kiocb.ki_filp = file; + iocb->kiocb.ki_flags = IOCB_DIRECT; + } else + init_sync_kiocb(&iocb->kiocb, file); + iocb->kiocb.ki_pos = hdr->args.offset; iocb->hdr = hdr; iocb->kiocb.ki_flags &= ~IOCB_APPEND; + iocb->aio_complete_work = NULL; + return iocb; } @@ -328,7 +388,7 @@ nfs_local_pgio_done(struct nfs_pgio_header *hdr, long status) hdr->res.op_status = NFS4_OK; hdr->task.tk_status = 0; } else { - hdr->res.op_status = nfs4_stat_to_errno(status); + hdr->res.op_status = nfs_localio_errno_to_nfs4_stat(status); hdr->task.tk_status = status; } } @@ -338,11 +398,23 @@ nfs_local_pgio_release(struct nfs_local_kiocb *iocb) { struct nfs_pgio_header *hdr = iocb->hdr; - nfs_to_nfsd_file_put_local(iocb->localio); + nfs_local_file_put(iocb->localio); nfs_local_iocb_free(iocb); nfs_local_hdr_release(hdr, hdr->task.tk_ops); } +/* + * Complete the I/O from iocb->kiocb.ki_complete() + * + * Note that this function can be called from a bottom half context, + * hence we need to queue the rpc_call_done() etc to a workqueue + */ +static inline void nfs_local_pgio_aio_complete(struct nfs_local_kiocb *iocb) +{ + INIT_WORK(&iocb->work, iocb->aio_complete_work); + queue_work(nfsiod_workqueue, &iocb->work); +} + static void nfs_local_read_done(struct nfs_local_kiocb *iocb, long status) { @@ -365,6 +437,23 @@ nfs_local_read_done(struct nfs_local_kiocb *iocb, long status) status > 0 ? status : 0, hdr->res.eof); } +static void nfs_local_read_aio_complete_work(struct work_struct *work) +{ + struct nfs_local_kiocb *iocb = + container_of(work, struct nfs_local_kiocb, work); + + nfs_local_pgio_release(iocb); +} + +static void nfs_local_read_aio_complete(struct kiocb *kiocb, long ret) +{ + struct nfs_local_kiocb *iocb = + container_of(kiocb, struct nfs_local_kiocb, kiocb); + + nfs_local_read_done(iocb, ret); + nfs_local_pgio_aio_complete(iocb); /* Calls nfs_local_read_aio_complete_work */ +} + static void nfs_local_call_read(struct work_struct *work) { struct nfs_local_kiocb *iocb = @@ -379,10 +468,10 @@ static void nfs_local_call_read(struct work_struct *work) nfs_local_iter_init(&iter, iocb, READ); status = filp->f_op->read_iter(&iocb->kiocb, &iter); - WARN_ON_ONCE(status == -EIOCBQUEUED); - - nfs_local_read_done(iocb, status); - nfs_local_pgio_release(iocb); + if (status != -EIOCBQUEUED) { + nfs_local_read_done(iocb, status); + nfs_local_pgio_release(iocb); + } revert_creds(save_cred); } @@ -410,6 +499,11 @@ nfs_do_local_read(struct nfs_pgio_header *hdr, nfs_local_pgio_init(hdr, call_ops); hdr->res.eof = false; + if (iocb->kiocb.ki_flags & IOCB_DIRECT) { + iocb->kiocb.ki_complete = nfs_local_read_aio_complete; + iocb->aio_complete_work = nfs_local_read_aio_complete_work; + } + INIT_WORK(&iocb->work, nfs_local_call_read); queue_work(nfslocaliod_workqueue, &iocb->work); @@ -534,6 +628,24 @@ nfs_local_write_done(struct nfs_local_kiocb *iocb, long status) nfs_local_pgio_done(hdr, status); } +static void nfs_local_write_aio_complete_work(struct work_struct *work) +{ + struct nfs_local_kiocb *iocb = + container_of(work, struct nfs_local_kiocb, work); + + nfs_local_vfs_getattr(iocb); + nfs_local_pgio_release(iocb); +} + +static void nfs_local_write_aio_complete(struct kiocb *kiocb, long ret) +{ + struct nfs_local_kiocb *iocb = + container_of(kiocb, struct nfs_local_kiocb, kiocb); + + nfs_local_write_done(iocb, ret); + nfs_local_pgio_aio_complete(iocb); /* Calls nfs_local_write_aio_complete_work */ +} + static void nfs_local_call_write(struct work_struct *work) { struct nfs_local_kiocb *iocb = @@ -552,11 +664,11 @@ static void nfs_local_call_write(struct work_struct *work) file_start_write(filp); status = filp->f_op->write_iter(&iocb->kiocb, &iter); file_end_write(filp); - WARN_ON_ONCE(status == -EIOCBQUEUED); - - nfs_local_write_done(iocb, status); - nfs_local_vfs_getattr(iocb); - nfs_local_pgio_release(iocb); + if (status != -EIOCBQUEUED) { + nfs_local_write_done(iocb, status); + nfs_local_vfs_getattr(iocb); + nfs_local_pgio_release(iocb); + } revert_creds(save_cred); current->flags = old_flags; @@ -592,10 +704,16 @@ nfs_do_local_write(struct nfs_pgio_header *hdr, case NFS_FILE_SYNC: iocb->kiocb.ki_flags |= IOCB_DSYNC|IOCB_SYNC; } + nfs_local_pgio_init(hdr, call_ops); nfs_set_local_verifier(hdr->inode, hdr->res.verf, hdr->args.stable); + if (iocb->kiocb.ki_flags & IOCB_DIRECT) { + iocb->kiocb.ki_complete = nfs_local_write_aio_complete; + iocb->aio_complete_work = nfs_local_write_aio_complete_work; + } + INIT_WORK(&iocb->work, nfs_local_call_write); queue_work(nfslocaliod_workqueue, &iocb->work); @@ -626,8 +744,8 @@ int nfs_local_doio(struct nfs_client *clp, struct nfsd_file *localio, if (status != 0) { if (status == -EAGAIN) - nfs_local_disable(clp); - nfs_to_nfsd_file_put_local(localio); + nfs_localio_disable_client(clp); + nfs_local_file_put(localio); hdr->task.tk_status = status; nfs_local_hdr_release(hdr, call_ops); } @@ -668,7 +786,7 @@ nfs_local_commit_done(struct nfs_commit_data *data, int status) data->task.tk_status = 0; } else { nfs_reset_boot_verifier(data->inode); - data->res.op_status = nfs4_stat_to_errno(status); + data->res.op_status = nfs_localio_errno_to_nfs4_stat(status); data->task.tk_status = status; } } @@ -678,7 +796,7 @@ nfs_local_release_commit_data(struct nfsd_file *localio, struct nfs_commit_data *data, const struct rpc_call_ops *call_ops) { - nfs_to_nfsd_file_put_local(localio); + nfs_local_file_put(localio); call_ops->rpc_call_done(&data->task, data); call_ops->rpc_release(data); } diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c index 1566163c6d85..7359e1a3bd84 100644 --- a/fs/nfs/nfs3proc.c +++ b/fs/nfs/nfs3proc.c @@ -844,6 +844,41 @@ nfs3_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle, return status; } +#if IS_ENABLED(CONFIG_NFS_LOCALIO) + +static unsigned nfs3_localio_probe_throttle __read_mostly = 0; +module_param(nfs3_localio_probe_throttle, uint, 0644); +MODULE_PARM_DESC(nfs3_localio_probe_throttle, + "Probe for NFSv3 LOCALIO every N IO requests. Must be power-of-2, defaults to 0 (probing disabled)."); + +static void nfs3_localio_probe(struct nfs_server *server) +{ + struct nfs_client *clp = server->nfs_client; + + /* Throttled to reduce nfs_local_probe_async() frequency */ + if (!nfs3_localio_probe_throttle || nfs_server_is_local(clp)) + return; + + /* + * Try (re)enabling LOCALIO if isn't enabled -- admin deems + * it worthwhile to periodically check if LOCALIO possible by + * setting the 'nfs3_localio_probe_throttle' module parameter. + * + * This is useful if LOCALIO was previously enabled, but was + * disabled due to server restart, and IO has successfully + * completed in terms of normal RPC. + */ + if ((clp->cl_uuid.nfs3_localio_probe_count++ & + (nfs3_localio_probe_throttle - 1)) == 0) { + if (!nfs_server_is_local(clp)) + nfs_local_probe_async(clp); + } +} + +#else +static void nfs3_localio_probe(struct nfs_server *server) {} +#endif + static int nfs3_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr) { struct inode *inode = hdr->inode; @@ -855,8 +890,11 @@ static int nfs3_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr) if (nfs3_async_handle_jukebox(task, inode)) return -EAGAIN; - if (task->tk_status >= 0 && !server->read_hdrsize) - cmpxchg(&server->read_hdrsize, 0, hdr->res.replen); + if (task->tk_status >= 0) { + if (!server->read_hdrsize) + cmpxchg(&server->read_hdrsize, 0, hdr->res.replen); + nfs3_localio_probe(server); + } nfs_invalidate_atime(inode); nfs_refresh_inode(inode, &hdr->fattr); @@ -886,8 +924,10 @@ static int nfs3_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr) if (nfs3_async_handle_jukebox(task, inode)) return -EAGAIN; - if (task->tk_status >= 0) + if (task->tk_status >= 0) { nfs_writeback_update_inode(hdr); + nfs3_localio_probe(NFS_SERVER(inode)); + } return 0; } diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c index 531c9c20ef1d..1924c4a2077b 100644 --- a/fs/nfs/nfs42proc.c +++ b/fs/nfs/nfs42proc.c @@ -498,15 +498,15 @@ out_put_src_lock: return err; } -struct nfs42_offloadcancel_data { +struct nfs42_offload_data { struct nfs_server *seq_server; struct nfs42_offload_status_args args; struct nfs42_offload_status_res res; }; -static void nfs42_offload_cancel_prepare(struct rpc_task *task, void *calldata) +static void nfs42_offload_prepare(struct rpc_task *task, void *calldata) { - struct nfs42_offloadcancel_data *data = calldata; + struct nfs42_offload_data *data = calldata; nfs4_setup_sequence(data->seq_server->nfs_client, &data->args.osa_seq_args, @@ -515,7 +515,7 @@ static void nfs42_offload_cancel_prepare(struct rpc_task *task, void *calldata) static void nfs42_offload_cancel_done(struct rpc_task *task, void *calldata) { - struct nfs42_offloadcancel_data *data = calldata; + struct nfs42_offload_data *data = calldata; trace_nfs4_offload_cancel(&data->args, task->tk_status); nfs41_sequence_done(task, &data->res.osr_seq_res); @@ -525,22 +525,22 @@ static void nfs42_offload_cancel_done(struct rpc_task *task, void *calldata) rpc_restart_call_prepare(task); } -static void nfs42_free_offloadcancel_data(void *data) +static void nfs42_offload_release(void *data) { kfree(data); } static const struct rpc_call_ops nfs42_offload_cancel_ops = { - .rpc_call_prepare = nfs42_offload_cancel_prepare, + .rpc_call_prepare = nfs42_offload_prepare, .rpc_call_done = nfs42_offload_cancel_done, - .rpc_release = nfs42_free_offloadcancel_data, + .rpc_release = nfs42_offload_release, }; static int nfs42_do_offload_cancel_async(struct file *dst, nfs4_stateid *stateid) { struct nfs_server *dst_server = NFS_SERVER(file_inode(dst)); - struct nfs42_offloadcancel_data *data = NULL; + struct nfs42_offload_data *data = NULL; struct nfs_open_context *ctx = nfs_file_open_context(dst); struct rpc_task *task; struct rpc_message msg = { @@ -552,14 +552,14 @@ static int nfs42_do_offload_cancel_async(struct file *dst, .rpc_message = &msg, .callback_ops = &nfs42_offload_cancel_ops, .workqueue = nfsiod_workqueue, - .flags = RPC_TASK_ASYNC, + .flags = RPC_TASK_ASYNC | RPC_TASK_MOVEABLE, }; int status; if (!(dst_server->caps & NFS_CAP_OFFLOAD_CANCEL)) return -EOPNOTSUPP; - data = kzalloc(sizeof(struct nfs42_offloadcancel_data), GFP_KERNEL); + data = kzalloc(sizeof(struct nfs42_offload_data), GFP_KERNEL); if (data == NULL) return -ENOMEM; @@ -861,7 +861,7 @@ int nfs42_proc_layoutstats_generic(struct nfs_server *server, .rpc_message = &msg, .callback_ops = &nfs42_layoutstat_ops, .callback_data = data, - .flags = RPC_TASK_ASYNC, + .flags = RPC_TASK_ASYNC | RPC_TASK_MOVEABLE, }; struct rpc_task *task; @@ -1016,7 +1016,7 @@ int nfs42_proc_layouterror(struct pnfs_layout_segment *lseg, struct rpc_task_setup task_setup = { .rpc_message = &msg, .callback_ops = &nfs42_layouterror_ops, - .flags = RPC_TASK_ASYNC, + .flags = RPC_TASK_ASYNC | RPC_TASK_MOVEABLE, }; unsigned int i; diff --git a/fs/nfs/nfs42xdr.c b/fs/nfs/nfs42xdr.c index 9e3ae53e2205..5072d7ea72e9 100644 --- a/fs/nfs/nfs42xdr.c +++ b/fs/nfs/nfs42xdr.c @@ -144,9 +144,11 @@ decode_putfh_maxsz + \ decode_offload_cancel_maxsz) #define NFS4_enc_copy_notify_sz (compound_encode_hdr_maxsz + \ + encode_sequence_maxsz + \ encode_putfh_maxsz + \ encode_copy_notify_maxsz) #define NFS4_dec_copy_notify_sz (compound_decode_hdr_maxsz + \ + decode_sequence_maxsz + \ decode_putfh_maxsz + \ decode_copy_notify_maxsz) #define NFS4_enc_deallocate_sz (compound_encode_hdr_maxsz + \ @@ -549,7 +551,7 @@ static void nfs4_xdr_enc_copy(struct rpc_rqst *req, } /* - * Encode OFFLOAD_CANEL request + * Encode OFFLOAD_CANCEL request */ static void nfs4_xdr_enc_offload_cancel(struct rpc_rqst *req, struct xdr_stream *xdr, diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 9a9f60a2291b..542cdf71229f 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -1955,6 +1955,7 @@ restart: } rcu_read_unlock(); nfs4_free_state_owners(&freeme); + nfs_local_probe_async(clp); if (lost_locks) pr_warn("NFS: %s: lost %d locks\n", clp->cl_hostname, lost_locks); diff --git a/fs/nfs/nfstrace.h b/fs/nfs/nfstrace.h index 1eab98c277fa..7a058bd8c566 100644 --- a/fs/nfs/nfstrace.h +++ b/fs/nfs/nfstrace.h @@ -1714,38 +1714,6 @@ TRACE_EVENT(nfs_local_open_fh, ) ); -DECLARE_EVENT_CLASS(nfs_local_client_event, - TP_PROTO( - const struct nfs_client *clp - ), - - TP_ARGS(clp), - - TP_STRUCT__entry( - __field(unsigned int, protocol) - __string(server, clp->cl_hostname) - ), - - TP_fast_assign( - __entry->protocol = clp->rpc_ops->version; - __assign_str(server); - ), - - TP_printk( - "server=%s NFSv%u", __get_str(server), __entry->protocol - ) -); - -#define DEFINE_NFS_LOCAL_CLIENT_EVENT(name) \ - DEFINE_EVENT(nfs_local_client_event, name, \ - TP_PROTO( \ - const struct nfs_client *clp \ - ), \ - TP_ARGS(clp)) - -DEFINE_NFS_LOCAL_CLIENT_EVENT(nfs_local_enable); -DEFINE_NFS_LOCAL_CLIENT_EVENT(nfs_local_disable); - DECLARE_EVENT_CLASS(nfs_xdr_event, TP_PROTO( const struct xdr_stream *xdr, diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index e27c07bd8929..11968dcb7243 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c @@ -961,8 +961,9 @@ static int nfs_generic_pg_pgios(struct nfs_pageio_descriptor *desc) struct nfs_client *clp = NFS_SERVER(hdr->inode)->nfs_client; struct nfsd_file *localio = - nfs_local_open_fh(clp, hdr->cred, - hdr->args.fh, hdr->args.context->mode); + nfs_local_open_fh(clp, hdr->cred, hdr->args.fh, + &hdr->args.context->nfl, + hdr->args.context->mode); if (NFS_SERVER(hdr->inode)->nfs_client->cl_minorversion) task_flags = RPC_TASK_MOVEABLE; diff --git a/fs/nfs/sysfs.c b/fs/nfs/sysfs.c index bf378ecd5d9f..7b59a40d40c0 100644 --- a/fs/nfs/sysfs.c +++ b/fs/nfs/sysfs.c @@ -280,9 +280,9 @@ void nfs_sysfs_link_rpc_client(struct nfs_server *server, char name[RPC_CLIENT_NAME_SIZE]; int ret; - strcpy(name, clnt->cl_program->name); - strcat(name, uniq ? uniq : ""); - strcat(name, "_client"); + strscpy(name, clnt->cl_program->name, sizeof(name)); + strncat(name, uniq ? uniq : "", sizeof(name) - strlen(name) - 1); + strncat(name, "_client", sizeof(name) - strlen(name) - 1); ret = sysfs_create_link_nowarn(&server->kobj, &clnt->cl_sysfs->kobject, name); diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 50fa539611f5..aa3d8bea3ec0 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -1826,7 +1826,8 @@ nfs_commit_list(struct inode *inode, struct list_head *head, int how, task_flags = RPC_TASK_MOVEABLE; localio = nfs_local_open_fh(NFS_SERVER(inode)->nfs_client, data->cred, - data->args.fh, data->context->mode); + data->args.fh, &data->context->nfl, + data->context->mode); return nfs_initiate_commit(NFS_CLIENT(inode), data, NFS_PROTO(inode), data->mds_ops, how, RPC_TASK_CRED_NOREF | task_flags, localio); diff --git a/fs/nfs_common/Makefile b/fs/nfs_common/Makefile index a5e54809701e..c10ead273ff2 100644 --- a/fs/nfs_common/Makefile +++ b/fs/nfs_common/Makefile @@ -6,8 +6,9 @@ obj-$(CONFIG_NFS_ACL_SUPPORT) += nfs_acl.o nfs_acl-objs := nfsacl.o +CFLAGS_localio_trace.o += -I$(src) obj-$(CONFIG_NFS_COMMON_LOCALIO_SUPPORT) += nfs_localio.o -nfs_localio-objs := nfslocalio.o +nfs_localio-objs := nfslocalio.o localio_trace.o obj-$(CONFIG_GRACE_PERIOD) += grace.o obj-$(CONFIG_NFS_V4_2_SSC_HELPER) += nfs_ssc.o diff --git a/fs/nfs_common/common.c b/fs/nfs_common/common.c index 34a115176f97..af09aed09fd2 100644 --- a/fs/nfs_common/common.c +++ b/fs/nfs_common/common.c @@ -15,7 +15,7 @@ static const struct { { NFS_OK, 0 }, { NFSERR_PERM, -EPERM }, { NFSERR_NOENT, -ENOENT }, - { NFSERR_IO, -errno_NFSERR_IO}, + { NFSERR_IO, -EIO }, { NFSERR_NXIO, -ENXIO }, /* { NFSERR_EAGAIN, -EAGAIN }, */ { NFSERR_ACCES, -EACCES }, @@ -45,7 +45,6 @@ static const struct { { NFSERR_SERVERFAULT, -EREMOTEIO }, { NFSERR_BADTYPE, -EBADTYPE }, { NFSERR_JUKEBOX, -EJUKEBOX }, - { -1, -EIO } }; /** @@ -59,26 +58,29 @@ int nfs_stat_to_errno(enum nfs_stat status) { int i; - for (i = 0; nfs_errtbl[i].stat != -1; i++) { + for (i = 0; i < ARRAY_SIZE(nfs_errtbl); i++) { if (nfs_errtbl[i].stat == (int)status) return nfs_errtbl[i].errno; } - return nfs_errtbl[i].errno; + return -EIO; } EXPORT_SYMBOL_GPL(nfs_stat_to_errno); /* * We need to translate between nfs v4 status return values and * the local errno values which may not be the same. + * + * nfs4_errtbl_common[] is used before more specialized mappings + * available in nfs4_errtbl[] or nfs4_errtbl_localio[]. */ static const struct { int stat; int errno; -} nfs4_errtbl[] = { +} nfs4_errtbl_common[] = { { NFS4_OK, 0 }, { NFS4ERR_PERM, -EPERM }, { NFS4ERR_NOENT, -ENOENT }, - { NFS4ERR_IO, -errno_NFSERR_IO}, + { NFS4ERR_IO, -EIO }, { NFS4ERR_NXIO, -ENXIO }, { NFS4ERR_ACCESS, -EACCES }, { NFS4ERR_EXIST, -EEXIST }, @@ -98,15 +100,20 @@ static const struct { { NFS4ERR_BAD_COOKIE, -EBADCOOKIE }, { NFS4ERR_NOTSUPP, -ENOTSUPP }, { NFS4ERR_TOOSMALL, -ETOOSMALL }, - { NFS4ERR_SERVERFAULT, -EREMOTEIO }, { NFS4ERR_BADTYPE, -EBADTYPE }, - { NFS4ERR_LOCKED, -EAGAIN }, { NFS4ERR_SYMLINK, -ELOOP }, - { NFS4ERR_OP_ILLEGAL, -EOPNOTSUPP }, { NFS4ERR_DEADLOCK, -EDEADLK }, +}; + +static const struct { + int stat; + int errno; +} nfs4_errtbl[] = { + { NFS4ERR_SERVERFAULT, -EREMOTEIO }, + { NFS4ERR_LOCKED, -EAGAIN }, + { NFS4ERR_OP_ILLEGAL, -EOPNOTSUPP }, { NFS4ERR_NOXATTR, -ENODATA }, { NFS4ERR_XATTR2BIG, -E2BIG }, - { -1, -EIO } }; /* @@ -116,7 +123,14 @@ static const struct { int nfs4_stat_to_errno(int stat) { int i; - for (i = 0; nfs4_errtbl[i].stat != -1; i++) { + + /* First check nfs4_errtbl_common */ + for (i = 0; i < ARRAY_SIZE(nfs4_errtbl_common); i++) { + if (nfs4_errtbl_common[i].stat == stat) + return nfs4_errtbl_common[i].errno; + } + /* Then check nfs4_errtbl */ + for (i = 0; i < ARRAY_SIZE(nfs4_errtbl); i++) { if (nfs4_errtbl[i].stat == stat) return nfs4_errtbl[i].errno; } @@ -132,3 +146,56 @@ int nfs4_stat_to_errno(int stat) return -stat; } EXPORT_SYMBOL_GPL(nfs4_stat_to_errno); + +/* + * This table is useful for conversion from local errno to NFS error. + * It provides more logically correct mappings for use with LOCALIO + * (which is focused on converting from errno to NFS status). + */ +static const struct { + int stat; + int errno; +} nfs4_errtbl_localio[] = { + /* Map errors differently than nfs4_errtbl */ + { NFS4ERR_IO, -EREMOTEIO }, + { NFS4ERR_DELAY, -EAGAIN }, + { NFS4ERR_FBIG, -E2BIG }, + /* Map errors not handled by nfs4_errtbl */ + { NFS4ERR_STALE, -EBADF }, + { NFS4ERR_STALE, -EOPENSTALE }, + { NFS4ERR_DELAY, -ETIMEDOUT }, + { NFS4ERR_DELAY, -ERESTARTSYS }, + { NFS4ERR_DELAY, -ENOMEM }, + { NFS4ERR_IO, -ETXTBSY }, + { NFS4ERR_IO, -EBUSY }, + { NFS4ERR_SERVERFAULT, -ESERVERFAULT }, + { NFS4ERR_SERVERFAULT, -ENFILE }, + { NFS4ERR_IO, -EUCLEAN }, + { NFS4ERR_PERM, -ENOKEY }, +}; + +/* + * Convert an errno to an NFS error code for LOCALIO. + */ +__u32 nfs_localio_errno_to_nfs4_stat(int errno) +{ + int i; + + /* First check nfs4_errtbl_common */ + for (i = 0; i < ARRAY_SIZE(nfs4_errtbl_common); i++) { + if (nfs4_errtbl_common[i].errno == errno) + return nfs4_errtbl_common[i].stat; + } + /* Then check nfs4_errtbl_localio */ + for (i = 0; i < ARRAY_SIZE(nfs4_errtbl_localio); i++) { + if (nfs4_errtbl_localio[i].errno == errno) + return nfs4_errtbl_localio[i].stat; + } + /* If we cannot translate the error, the recovery routines should + * handle it. + * Note: remaining NFSv4 error codes have values > 10000, so should + * not conflict with native Linux error codes. + */ + return NFS4ERR_SERVERFAULT; +} +EXPORT_SYMBOL_GPL(nfs_localio_errno_to_nfs4_stat); diff --git a/fs/nfs_common/localio_trace.c b/fs/nfs_common/localio_trace.c new file mode 100644 index 000000000000..7decfe57abeb --- /dev/null +++ b/fs/nfs_common/localio_trace.c @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2024 Trond Myklebust <trond.myklebust@hammerspace.com> + * Copyright (C) 2024 Mike Snitzer <snitzer@hammerspace.com> + */ +#include <linux/nfs_fs.h> +#include <linux/namei.h> + +#define CREATE_TRACE_POINTS +#include "localio_trace.h" diff --git a/fs/nfs_common/localio_trace.h b/fs/nfs_common/localio_trace.h new file mode 100644 index 000000000000..4055aec9ff8d --- /dev/null +++ b/fs/nfs_common/localio_trace.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2024 Trond Myklebust <trond.myklebust@hammerspace.com> + * Copyright (C) 2024 Mike Snitzer <snitzer@hammerspace.com> + */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM nfs_localio + +#if !defined(_TRACE_NFS_COMMON_LOCALIO_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_NFS_COMMON_LOCALIO_H + +#include <linux/tracepoint.h> + +#include <trace/misc/fs.h> +#include <trace/misc/nfs.h> +#include <trace/misc/sunrpc.h> + +DECLARE_EVENT_CLASS(nfs_local_client_event, + TP_PROTO( + const struct nfs_client *clp + ), + + TP_ARGS(clp), + + TP_STRUCT__entry( + __field(unsigned int, protocol) + __string(server, clp->cl_hostname) + ), + + TP_fast_assign( + __entry->protocol = clp->rpc_ops->version; + __assign_str(server); + ), + + TP_printk( + "server=%s NFSv%u", __get_str(server), __entry->protocol + ) +); + +#define DEFINE_NFS_LOCAL_CLIENT_EVENT(name) \ + DEFINE_EVENT(nfs_local_client_event, name, \ + TP_PROTO( \ + const struct nfs_client *clp \ + ), \ + TP_ARGS(clp)) + +DEFINE_NFS_LOCAL_CLIENT_EVENT(nfs_localio_enable_client); +DEFINE_NFS_LOCAL_CLIENT_EVENT(nfs_localio_disable_client); + +#endif /* _TRACE_NFS_COMMON_LOCALIO_H */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#define TRACE_INCLUDE_FILE localio_trace +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/fs/nfs_common/nfslocalio.c b/fs/nfs_common/nfslocalio.c index a74ec08f6c96..6a0bdea6d644 100644 --- a/fs/nfs_common/nfslocalio.c +++ b/fs/nfs_common/nfslocalio.c @@ -7,38 +7,67 @@ #include <linux/module.h> #include <linux/list.h> #include <linux/nfslocalio.h> +#include <linux/nfs3.h> +#include <linux/nfs4.h> +#include <linux/nfs_fs.h> #include <net/netns/generic.h> +#include "localio_trace.h" + MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("NFS localio protocol bypass support"); -static DEFINE_SPINLOCK(nfs_uuid_lock); +static DEFINE_SPINLOCK(nfs_uuids_lock); /* * Global list of nfs_uuid_t instances - * that is protected by nfs_uuid_lock. + * that is protected by nfs_uuids_lock. */ static LIST_HEAD(nfs_uuids); +/* + * Lock ordering: + * 1: nfs_uuid->lock + * 2: nfs_uuids_lock + * 3: nfs_uuid->list_lock (aka nn->local_clients_lock) + * + * May skip locks in select cases, but never hold multiple + * locks out of order. + */ + void nfs_uuid_init(nfs_uuid_t *nfs_uuid) { - nfs_uuid->net = NULL; + RCU_INIT_POINTER(nfs_uuid->net, NULL); nfs_uuid->dom = NULL; + nfs_uuid->list_lock = NULL; INIT_LIST_HEAD(&nfs_uuid->list); + INIT_LIST_HEAD(&nfs_uuid->files); + spin_lock_init(&nfs_uuid->lock); + nfs_uuid->nfs3_localio_probe_count = 0; } EXPORT_SYMBOL_GPL(nfs_uuid_init); bool nfs_uuid_begin(nfs_uuid_t *nfs_uuid) { - spin_lock(&nfs_uuid_lock); - /* Is this nfs_uuid already in use? */ + spin_lock(&nfs_uuid->lock); + if (rcu_access_pointer(nfs_uuid->net)) { + /* This nfs_uuid is already in use */ + spin_unlock(&nfs_uuid->lock); + return false; + } + + spin_lock(&nfs_uuids_lock); if (!list_empty(&nfs_uuid->list)) { - spin_unlock(&nfs_uuid_lock); + /* This nfs_uuid is already in use */ + spin_unlock(&nfs_uuids_lock); + spin_unlock(&nfs_uuid->lock); return false; } - uuid_gen(&nfs_uuid->uuid); list_add_tail(&nfs_uuid->list, &nfs_uuids); - spin_unlock(&nfs_uuid_lock); + spin_unlock(&nfs_uuids_lock); + + uuid_gen(&nfs_uuid->uuid); + spin_unlock(&nfs_uuid->lock); return true; } @@ -46,12 +75,16 @@ EXPORT_SYMBOL_GPL(nfs_uuid_begin); void nfs_uuid_end(nfs_uuid_t *nfs_uuid) { - if (nfs_uuid->net == NULL) { - spin_lock(&nfs_uuid_lock); - if (nfs_uuid->net == NULL) + if (!rcu_access_pointer(nfs_uuid->net)) { + spin_lock(&nfs_uuid->lock); + if (!rcu_access_pointer(nfs_uuid->net)) { + /* Not local, remove from nfs_uuids */ + spin_lock(&nfs_uuids_lock); list_del_init(&nfs_uuid->list); - spin_unlock(&nfs_uuid_lock); - } + spin_unlock(&nfs_uuids_lock); + } + spin_unlock(&nfs_uuid->lock); + } } EXPORT_SYMBOL_GPL(nfs_uuid_end); @@ -69,68 +102,142 @@ static nfs_uuid_t * nfs_uuid_lookup_locked(const uuid_t *uuid) static struct module *nfsd_mod; void nfs_uuid_is_local(const uuid_t *uuid, struct list_head *list, - struct net *net, struct auth_domain *dom, - struct module *mod) + spinlock_t *list_lock, struct net *net, + struct auth_domain *dom, struct module *mod) { nfs_uuid_t *nfs_uuid; - spin_lock(&nfs_uuid_lock); + spin_lock(&nfs_uuids_lock); nfs_uuid = nfs_uuid_lookup_locked(uuid); - if (nfs_uuid) { - kref_get(&dom->ref); - nfs_uuid->dom = dom; - /* - * We don't hold a ref on the net, but instead put - * ourselves on a list so the net pointer can be - * invalidated. - */ - list_move(&nfs_uuid->list, list); - rcu_assign_pointer(nfs_uuid->net, net); - - __module_get(mod); - nfsd_mod = mod; + if (!nfs_uuid) { + spin_unlock(&nfs_uuids_lock); + return; } - spin_unlock(&nfs_uuid_lock); + + /* + * We don't hold a ref on the net, but instead put + * ourselves on @list (nn->local_clients) so the net + * pointer can be invalidated. + */ + spin_lock(list_lock); /* list_lock is nn->local_clients_lock */ + list_move(&nfs_uuid->list, list); + spin_unlock(list_lock); + + spin_unlock(&nfs_uuids_lock); + /* Once nfs_uuid is parented to @list, avoid global nfs_uuids_lock */ + spin_lock(&nfs_uuid->lock); + + __module_get(mod); + nfsd_mod = mod; + + nfs_uuid->list_lock = list_lock; + kref_get(&dom->ref); + nfs_uuid->dom = dom; + rcu_assign_pointer(nfs_uuid->net, net); + spin_unlock(&nfs_uuid->lock); } EXPORT_SYMBOL_GPL(nfs_uuid_is_local); -static void nfs_uuid_put_locked(nfs_uuid_t *nfs_uuid) +void nfs_localio_enable_client(struct nfs_client *clp) +{ + /* nfs_uuid_is_local() does the actual enablement */ + trace_nfs_localio_enable_client(clp); +} +EXPORT_SYMBOL_GPL(nfs_localio_enable_client); + +/* + * Cleanup the nfs_uuid_t embedded in an nfs_client. + * This is the long-form of nfs_uuid_init(). + */ +static bool nfs_uuid_put(nfs_uuid_t *nfs_uuid) { - if (nfs_uuid->net) { - module_put(nfsd_mod); - nfs_uuid->net = NULL; + LIST_HEAD(local_files); + struct nfs_file_localio *nfl, *tmp; + + spin_lock(&nfs_uuid->lock); + if (unlikely(!rcu_access_pointer(nfs_uuid->net))) { + spin_unlock(&nfs_uuid->lock); + return false; } + RCU_INIT_POINTER(nfs_uuid->net, NULL); + if (nfs_uuid->dom) { auth_domain_put(nfs_uuid->dom); nfs_uuid->dom = NULL; } - list_del_init(&nfs_uuid->list); + + list_splice_init(&nfs_uuid->files, &local_files); + spin_unlock(&nfs_uuid->lock); + + /* Walk list of files and ensure their last references dropped */ + list_for_each_entry_safe(nfl, tmp, &local_files, list) { + nfs_close_local_fh(nfl); + cond_resched(); + } + + spin_lock(&nfs_uuid->lock); + BUG_ON(!list_empty(&nfs_uuid->files)); + + /* Remove client from nn->local_clients */ + if (nfs_uuid->list_lock) { + spin_lock(nfs_uuid->list_lock); + BUG_ON(list_empty(&nfs_uuid->list)); + list_del_init(&nfs_uuid->list); + spin_unlock(nfs_uuid->list_lock); + nfs_uuid->list_lock = NULL; + } + + module_put(nfsd_mod); + spin_unlock(&nfs_uuid->lock); + + return true; } -void nfs_uuid_invalidate_clients(struct list_head *list) +void nfs_localio_disable_client(struct nfs_client *clp) { + if (nfs_uuid_put(&clp->cl_uuid)) + trace_nfs_localio_disable_client(clp); +} +EXPORT_SYMBOL_GPL(nfs_localio_disable_client); + +void nfs_localio_invalidate_clients(struct list_head *nn_local_clients, + spinlock_t *nn_local_clients_lock) +{ + LIST_HEAD(local_clients); nfs_uuid_t *nfs_uuid, *tmp; + struct nfs_client *clp; - spin_lock(&nfs_uuid_lock); - list_for_each_entry_safe(nfs_uuid, tmp, list, list) - nfs_uuid_put_locked(nfs_uuid); - spin_unlock(&nfs_uuid_lock); + spin_lock(nn_local_clients_lock); + list_splice_init(nn_local_clients, &local_clients); + spin_unlock(nn_local_clients_lock); + list_for_each_entry_safe(nfs_uuid, tmp, &local_clients, list) { + if (WARN_ON(nfs_uuid->list_lock != nn_local_clients_lock)) + break; + clp = container_of(nfs_uuid, struct nfs_client, cl_uuid); + nfs_localio_disable_client(clp); + } } -EXPORT_SYMBOL_GPL(nfs_uuid_invalidate_clients); +EXPORT_SYMBOL_GPL(nfs_localio_invalidate_clients); -void nfs_uuid_invalidate_one_client(nfs_uuid_t *nfs_uuid) +static void nfs_uuid_add_file(nfs_uuid_t *nfs_uuid, struct nfs_file_localio *nfl) { - if (nfs_uuid->net) { - spin_lock(&nfs_uuid_lock); - nfs_uuid_put_locked(nfs_uuid); - spin_unlock(&nfs_uuid_lock); + /* Add nfl to nfs_uuid->files if it isn't already */ + spin_lock(&nfs_uuid->lock); + if (list_empty(&nfl->list)) { + rcu_assign_pointer(nfl->nfs_uuid, nfs_uuid); + list_add_tail(&nfl->list, &nfs_uuid->files); } + spin_unlock(&nfs_uuid->lock); } -EXPORT_SYMBOL_GPL(nfs_uuid_invalidate_one_client); +/* + * Caller is responsible for calling nfsd_net_put and + * nfsd_file_put (via nfs_to_nfsd_file_put_local). + */ struct nfsd_file *nfs_open_local_fh(nfs_uuid_t *uuid, struct rpc_clnt *rpc_clnt, const struct cred *cred, - const struct nfs_fh *nfs_fh, const fmode_t fmode) + const struct nfs_fh *nfs_fh, struct nfs_file_localio *nfl, + const fmode_t fmode) { struct net *net; struct nfsd_file *localio; @@ -139,7 +246,7 @@ struct nfsd_file *nfs_open_local_fh(nfs_uuid_t *uuid, * Not running in nfsd context, so must safely get reference on nfsd_serv. * But the server may already be shutting down, if so disallow new localio. * uuid->net is NOT a counted reference, but rcu_read_lock() ensures that - * if uuid->net is not NULL, then calling nfsd_serv_try_get() is safe + * if uuid->net is not NULL, then calling nfsd_net_try_get() is safe * and if it succeeds we will have an implied reference to the net. * * Otherwise NFS may not have ref on NFSD and therefore cannot safely @@ -147,21 +254,62 @@ struct nfsd_file *nfs_open_local_fh(nfs_uuid_t *uuid, */ rcu_read_lock(); net = rcu_dereference(uuid->net); - if (!net || !nfs_to->nfsd_serv_try_get(net)) { + if (!net || !nfs_to->nfsd_net_try_get(net)) { rcu_read_unlock(); return ERR_PTR(-ENXIO); } rcu_read_unlock(); - /* We have an implied reference to net thanks to nfsd_serv_try_get */ + /* We have an implied reference to net thanks to nfsd_net_try_get */ localio = nfs_to->nfsd_open_local_fh(net, uuid->dom, rpc_clnt, cred, nfs_fh, fmode); if (IS_ERR(localio)) nfs_to_nfsd_net_put(net); + else + nfs_uuid_add_file(uuid, nfl); return localio; } EXPORT_SYMBOL_GPL(nfs_open_local_fh); +void nfs_close_local_fh(struct nfs_file_localio *nfl) +{ + struct nfsd_file *ro_nf = NULL; + struct nfsd_file *rw_nf = NULL; + nfs_uuid_t *nfs_uuid; + + rcu_read_lock(); + nfs_uuid = rcu_dereference(nfl->nfs_uuid); + if (!nfs_uuid) { + /* regular (non-LOCALIO) NFS will hammer this */ + rcu_read_unlock(); + return; + } + + ro_nf = rcu_access_pointer(nfl->ro_file); + rw_nf = rcu_access_pointer(nfl->rw_file); + if (ro_nf || rw_nf) { + spin_lock(&nfs_uuid->lock); + if (ro_nf) + ro_nf = rcu_dereference_protected(xchg(&nfl->ro_file, NULL), 1); + if (rw_nf) + rw_nf = rcu_dereference_protected(xchg(&nfl->rw_file, NULL), 1); + + /* Remove nfl from nfs_uuid->files list */ + RCU_INIT_POINTER(nfl->nfs_uuid, NULL); + list_del_init(&nfl->list); + spin_unlock(&nfs_uuid->lock); + rcu_read_unlock(); + + if (ro_nf) + nfs_to_nfsd_file_put_local(ro_nf); + if (rw_nf) + nfs_to_nfsd_file_put_local(rw_nf); + return; + } + rcu_read_unlock(); +} +EXPORT_SYMBOL_GPL(nfs_close_local_fh); + /* * The NFS LOCALIO code needs to call into NFSD using various symbols, * but cannot be statically linked, because that will make the NFS diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c index dc5c9d8e8202..0e552d873eaa 100644 --- a/fs/nfsd/filecache.c +++ b/fs/nfsd/filecache.c @@ -39,6 +39,7 @@ #include <linux/fsnotify.h> #include <linux/seq_file.h> #include <linux/rhashtable.h> +#include <linux/nfslocalio.h> #include "vfs.h" #include "nfsd.h" @@ -391,7 +392,7 @@ nfsd_file_put(struct nfsd_file *nf) } /** - * nfsd_file_put_local - put nfsd_file reference and arm nfsd_serv_put in caller + * nfsd_file_put_local - put nfsd_file reference and arm nfsd_net_put in caller * @nf: nfsd_file of which to put the reference * * First save the associated net to return to caller, then put @@ -833,6 +834,14 @@ __nfsd_file_cache_purge(struct net *net) struct nfsd_file *nf; LIST_HEAD(dispose); +#if IS_ENABLED(CONFIG_NFS_LOCALIO) + if (net) { + struct nfsd_net *nn = net_generic(net, nfsd_net_id); + nfs_localio_invalidate_clients(&nn->local_clients, + &nn->local_clients_lock); + } +#endif + rhltable_walk_enter(&nfsd_file_rhltable, &iter); do { rhashtable_walk_start(&iter); @@ -1222,10 +1231,9 @@ nfsd_file_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp, * a file. The security implications of this should be carefully * considered before use. * - * The nfsd_file object returned by this API is reference-counted - * and garbage-collected. The object is retained for a few - * seconds after the final nfsd_file_put() in case the caller - * wants to re-use it. + * The nfsd_file_object returned by this API is reference-counted + * but not garbage-collected. The object is unhashed after the + * final nfsd_file_put(). * * Return values: * %nfs_ok - @pnf points to an nfsd_file with its reference @@ -1247,7 +1255,7 @@ nfsd_file_acquire_local(struct net *net, struct svc_cred *cred, __be32 beres; beres = nfsd_file_do_acquire(NULL, net, cred, client, - fhp, may_flags, NULL, pnf, true); + fhp, may_flags, NULL, pnf, false); put_cred(revert_creds(save_cred)); return beres; } diff --git a/fs/nfsd/localio.c b/fs/nfsd/localio.c index f441cb9f74d5..238647fa379e 100644 --- a/fs/nfsd/localio.c +++ b/fs/nfsd/localio.c @@ -25,10 +25,12 @@ #include "cache.h" static const struct nfsd_localio_operations nfsd_localio_ops = { - .nfsd_serv_try_get = nfsd_serv_try_get, - .nfsd_serv_put = nfsd_serv_put, + .nfsd_net_try_get = nfsd_net_try_get, + .nfsd_net_put = nfsd_net_put, .nfsd_open_local_fh = nfsd_open_local_fh, .nfsd_file_put_local = nfsd_file_put_local, + .nfsd_file_get = nfsd_file_get, + .nfsd_file_put = nfsd_file_put, .nfsd_file_file = nfsd_file_file, }; @@ -52,7 +54,7 @@ void nfsd_localio_ops_init(void) * avoid all the NFS overhead with reads, writes and commits. * * On successful return, returned nfsd_file will have its nf_net member - * set. Caller (NFS client) is responsible for calling nfsd_serv_put and + * set. Caller (NFS client) is responsible for calling nfsd_net_put and * nfsd_file_put (via nfs_to_nfsd_file_put_local). */ struct nfsd_file * @@ -114,6 +116,7 @@ static __be32 localio_proc_uuid_is_local(struct svc_rqst *rqstp) struct nfsd_net *nn = net_generic(net, nfsd_net_id); nfs_uuid_is_local(&argp->uuid, &nn->local_clients, + &nn->local_clients_lock, net, rqstp->rq_client, THIS_MODULE); return rpc_success; diff --git a/fs/nfsd/netns.h b/fs/nfsd/netns.h index 4a07b8d0837b..3e2d0fde80a7 100644 --- a/fs/nfsd/netns.h +++ b/fs/nfsd/netns.h @@ -134,9 +134,10 @@ struct nfsd_net { struct svc_info nfsd_info; #define nfsd_serv nfsd_info.serv - struct percpu_ref nfsd_serv_ref; - struct completion nfsd_serv_confirm_done; - struct completion nfsd_serv_free_done; + + struct percpu_ref nfsd_net_ref; + struct completion nfsd_net_confirm_done; + struct completion nfsd_net_free_done; /* * clientid and stateid data for construction of net unique COPY @@ -213,6 +214,7 @@ struct nfsd_net { #if IS_ENABLED(CONFIG_NFS_LOCALIO) /* Local clients to be invalidated when net is shut down */ + spinlock_t local_clients_lock; struct list_head local_clients; #endif }; @@ -223,8 +225,8 @@ struct nfsd_net { extern bool nfsd_support_version(int vers); extern unsigned int nfsd_net_id; -bool nfsd_serv_try_get(struct net *net); -void nfsd_serv_put(struct net *net); +bool nfsd_net_try_get(struct net *net); +void nfsd_net_put(struct net *net); void nfsd_copy_write_verifier(__be32 verf[2], struct nfsd_net *nn); void nfsd_reset_write_verifier(struct nfsd_net *nn); diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c index 95ea4393305b..ce2a71e4904c 100644 --- a/fs/nfsd/nfsctl.c +++ b/fs/nfsd/nfsctl.c @@ -2217,6 +2217,7 @@ static __net_init int nfsd_net_init(struct net *net) seqlock_init(&nn->writeverf_lock); nfsd_proc_stat_init(net); #if IS_ENABLED(CONFIG_NFS_LOCALIO) + spin_lock_init(&nn->local_clients_lock); INIT_LIST_HEAD(&nn->local_clients); #endif return 0; @@ -2234,14 +2235,15 @@ out_export_error: * nfsd_net_pre_exit - Disconnect localio clients from net namespace * @net: a network namespace that is about to be destroyed * - * This invalidated ->net pointers held by localio clients + * This invalidates ->net pointers held by localio clients * while they can still safely access nn->counter. */ static __net_exit void nfsd_net_pre_exit(struct net *net) { struct nfsd_net *nn = net_generic(net, nfsd_net_id); - nfs_uuid_invalidate_clients(&nn->local_clients); + nfs_localio_invalidate_clients(&nn->local_clients, + &nn->local_clients_lock); } #endif diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c index 3f5104ed70bf..9b3d6cff0e1e 100644 --- a/fs/nfsd/nfssvc.c +++ b/fs/nfsd/nfssvc.c @@ -204,32 +204,32 @@ int nfsd_minorversion(struct nfsd_net *nn, u32 minorversion, enum vers_op change return 0; } -bool nfsd_serv_try_get(struct net *net) __must_hold(rcu) +bool nfsd_net_try_get(struct net *net) __must_hold(rcu) { struct nfsd_net *nn = net_generic(net, nfsd_net_id); - return (nn && percpu_ref_tryget_live(&nn->nfsd_serv_ref)); + return (nn && percpu_ref_tryget_live(&nn->nfsd_net_ref)); } -void nfsd_serv_put(struct net *net) __must_hold(rcu) +void nfsd_net_put(struct net *net) __must_hold(rcu) { struct nfsd_net *nn = net_generic(net, nfsd_net_id); - percpu_ref_put(&nn->nfsd_serv_ref); + percpu_ref_put(&nn->nfsd_net_ref); } -static void nfsd_serv_done(struct percpu_ref *ref) +static void nfsd_net_done(struct percpu_ref *ref) { - struct nfsd_net *nn = container_of(ref, struct nfsd_net, nfsd_serv_ref); + struct nfsd_net *nn = container_of(ref, struct nfsd_net, nfsd_net_ref); - complete(&nn->nfsd_serv_confirm_done); + complete(&nn->nfsd_net_confirm_done); } -static void nfsd_serv_free(struct percpu_ref *ref) +static void nfsd_net_free(struct percpu_ref *ref) { - struct nfsd_net *nn = container_of(ref, struct nfsd_net, nfsd_serv_ref); + struct nfsd_net *nn = container_of(ref, struct nfsd_net, nfsd_net_ref); - complete(&nn->nfsd_serv_free_done); + complete(&nn->nfsd_net_free_done); } /* @@ -426,6 +426,10 @@ static void nfsd_shutdown_net(struct net *net) if (!nn->nfsd_net_up) return; + + percpu_ref_kill_and_confirm(&nn->nfsd_net_ref, nfsd_net_done); + wait_for_completion(&nn->nfsd_net_confirm_done); + nfsd_export_flush(net); nfs4_state_shutdown_net(net); nfsd_reply_cache_shutdown(nn); @@ -434,7 +438,10 @@ static void nfsd_shutdown_net(struct net *net) lockd_down(net); nn->lockd_up = false; } - percpu_ref_exit(&nn->nfsd_serv_ref); + + wait_for_completion(&nn->nfsd_net_free_done); + percpu_ref_exit(&nn->nfsd_net_ref); + nn->nfsd_net_up = false; nfsd_shutdown_generic(); } @@ -516,11 +523,6 @@ void nfsd_destroy_serv(struct net *net) lockdep_assert_held(&nfsd_mutex); - percpu_ref_kill_and_confirm(&nn->nfsd_serv_ref, nfsd_serv_done); - wait_for_completion(&nn->nfsd_serv_confirm_done); - wait_for_completion(&nn->nfsd_serv_free_done); - /* percpu_ref_exit is called in nfsd_shutdown_net */ - spin_lock(&nfsd_notifier_lock); nn->nfsd_serv = NULL; spin_unlock(&nfsd_notifier_lock); @@ -621,12 +623,12 @@ int nfsd_create_serv(struct net *net) if (nn->nfsd_serv) return 0; - error = percpu_ref_init(&nn->nfsd_serv_ref, nfsd_serv_free, + error = percpu_ref_init(&nn->nfsd_net_ref, nfsd_net_free, 0, GFP_KERNEL); if (error) return error; - init_completion(&nn->nfsd_serv_free_done); - init_completion(&nn->nfsd_serv_confirm_done); + init_completion(&nn->nfsd_net_free_done); + init_completion(&nn->nfsd_net_confirm_done); if (nfsd_max_blksize == 0) nfsd_max_blksize = nfsd_get_default_max_blksize(); diff --git a/fs/orangefs/orangefs-debugfs.c b/fs/orangefs/orangefs-debugfs.c index 1b508f543384..9729f071c5aa 100644 --- a/fs/orangefs/orangefs-debugfs.c +++ b/fs/orangefs/orangefs-debugfs.c @@ -206,8 +206,8 @@ static void orangefs_kernel_debug_init(void) pr_info("%s: overflow 1!\n", __func__); } - debugfs_create_file(ORANGEFS_KMOD_DEBUG_FILE, 0444, debug_dir, k_buffer, - &kernel_debug_fops); + debugfs_create_file_aux_num(ORANGEFS_KMOD_DEBUG_FILE, 0444, debug_dir, k_buffer, + 0, &kernel_debug_fops); } @@ -306,11 +306,10 @@ static void orangefs_client_debug_init(void) pr_info("%s: overflow! 2\n", __func__); } - client_debug_dentry = debugfs_create_file(ORANGEFS_CLIENT_DEBUG_FILE, - 0444, - debug_dir, - c_buffer, - &kernel_debug_fops); + client_debug_dentry = debugfs_create_file_aux_num( + ORANGEFS_CLIENT_DEBUG_FILE, + 0444, debug_dir, c_buffer, 1, + &kernel_debug_fops); } /* open ORANGEFS_KMOD_DEBUG_FILE or ORANGEFS_CLIENT_DEBUG_FILE.*/ @@ -418,8 +417,7 @@ static ssize_t orangefs_debug_write(struct file *file, * A service operation is required to set a new client-side * debug mask. */ - if (!strcmp(file->f_path.dentry->d_name.name, - ORANGEFS_KMOD_DEBUG_FILE)) { + if (!debugfs_get_aux_num(file)) { // kernel-debug debug_string_to_mask(buf, &orangefs_gossip_debug_mask, 0); debug_mask_to_string(&orangefs_gossip_debug_mask, 0); debug_string = kernel_debug_string; diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c index 785408861c01..6931308876c4 100644 --- a/fs/sysfs/file.c +++ b/fs/sysfs/file.c @@ -817,7 +817,7 @@ EXPORT_SYMBOL_GPL(sysfs_emit_at); * Returns number of bytes written to @buf. */ ssize_t sysfs_bin_attr_simple_read(struct file *file, struct kobject *kobj, - struct bin_attribute *attr, char *buf, + const struct bin_attribute *attr, char *buf, loff_t off, size_t count) { memcpy(buf, attr->private + off, count); diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h index d076ebd19a61..78b24b090488 100644 --- a/include/acpi/acpixf.h +++ b/include/acpi/acpixf.h @@ -763,6 +763,7 @@ ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status *event_status)) ACPI_HW_DEPENDENT_RETURN_UINT32(u32 acpi_dispatch_gpe(acpi_handle gpe_device, u32 gpe_number)) ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_hw_disable_all_gpes(void)) +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_hw_enable_all_wakeup_gpes(void)) ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_disable_all_gpes(void)) ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_runtime_gpes(void)) ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_wakeup_gpes(void)) diff --git a/include/clocksource/arm_arch_timer.h b/include/clocksource/arm_arch_timer.h index cbbc9a6dc571..ce6521ad04d1 100644 --- a/include/clocksource/arm_arch_timer.h +++ b/include/clocksource/arm_arch_timer.h @@ -22,6 +22,12 @@ #define CNTHCTL_EVNTDIR (1 << 3) #define CNTHCTL_EVNTI (0xF << 4) #define CNTHCTL_ECV (1 << 12) +#define CNTHCTL_EL1TVT (1 << 13) +#define CNTHCTL_EL1TVCT (1 << 14) +#define CNTHCTL_EL1NVPCT (1 << 15) +#define CNTHCTL_EL1NVVCT (1 << 16) +#define CNTHCTL_CNTVMASK (1 << 18) +#define CNTHCTL_CNTPMASK (1 << 19) enum arch_timer_reg { ARCH_TIMER_REG_CTRL, diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h index fd650a8789b9..681cf0c8b9df 100644 --- a/include/kvm/arm_arch_timer.h +++ b/include/kvm/arm_arch_timer.h @@ -98,6 +98,7 @@ int __init kvm_timer_hyp_init(bool has_gic); int kvm_timer_enable(struct kvm_vcpu *vcpu); void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu); void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu); +void kvm_timer_sync_nested(struct kvm_vcpu *vcpu); void kvm_timer_sync_user(struct kvm_vcpu *vcpu); bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu); void kvm_timer_update_run(struct kvm_vcpu *vcpu); @@ -150,9 +151,31 @@ void kvm_timer_cpu_down(void); /* CNTKCTL_EL1 valid bits as of DDI0487J.a */ #define CNTKCTL_VALID_BITS (BIT(17) | GENMASK_ULL(9, 0)) +DECLARE_STATIC_KEY_FALSE(broken_cntvoff_key); + +static inline bool has_broken_cntvoff(void) +{ + return static_branch_unlikely(&broken_cntvoff_key); +} + static inline bool has_cntpoff(void) { return (has_vhe() && cpus_have_final_cap(ARM64_HAS_ECV_CNTPOFF)); } +static inline u64 timer_get_offset(struct arch_timer_context *ctxt) +{ + u64 offset = 0; + + if (!ctxt) + return 0; + + if (ctxt->offset.vm_offset) + offset += *ctxt->offset.vm_offset; + if (ctxt->offset.vcpu_offset) + offset += *ctxt->offset.vcpu_offset; + + return offset; +} + #endif diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h index 59444b495d49..fa2568b4380d 100644 --- a/include/linux/debugfs.h +++ b/include/linux/debugfs.h @@ -67,21 +67,23 @@ static const struct file_operations __fops = { \ typedef struct vfsmount *(*debugfs_automount_t)(struct dentry *, void *); -#if defined(CONFIG_DEBUG_FS) - -struct dentry *debugfs_lookup(const char *name, struct dentry *parent); - struct debugfs_short_fops { ssize_t (*read)(struct file *, char __user *, size_t, loff_t *); ssize_t (*write)(struct file *, const char __user *, size_t, loff_t *); loff_t (*llseek) (struct file *, loff_t, int); }; +#if defined(CONFIG_DEBUG_FS) + +struct dentry *debugfs_lookup(const char *name, struct dentry *parent); + struct dentry *debugfs_create_file_full(const char *name, umode_t mode, struct dentry *parent, void *data, + const void *aux, const struct file_operations *fops); struct dentry *debugfs_create_file_short(const char *name, umode_t mode, struct dentry *parent, void *data, + const void *aux, const struct debugfs_short_fops *fops); /** @@ -126,7 +128,15 @@ struct dentry *debugfs_create_file_short(const char *name, umode_t mode, const struct debugfs_short_fops *: debugfs_create_file_short, \ struct file_operations *: debugfs_create_file_full, \ struct debugfs_short_fops *: debugfs_create_file_short) \ - (name, mode, parent, data, fops) + (name, mode, parent, data, NULL, fops) + +#define debugfs_create_file_aux(name, mode, parent, data, aux, fops) \ + _Generic(fops, \ + const struct file_operations *: debugfs_create_file_full, \ + const struct debugfs_short_fops *: debugfs_create_file_short, \ + struct file_operations *: debugfs_create_file_full, \ + struct debugfs_short_fops *: debugfs_create_file_short) \ + (name, mode, parent, data, aux, fops) struct dentry *debugfs_create_file_unsafe(const char *name, umode_t mode, struct dentry *parent, void *data, @@ -153,6 +163,7 @@ void debugfs_remove(struct dentry *dentry); void debugfs_lookup_and_remove(const char *name, struct dentry *parent); const struct file_operations *debugfs_real_fops(const struct file *filp); +const void *debugfs_get_aux(const struct file *file); int debugfs_file_get(struct dentry *dentry); void debugfs_file_put(struct dentry *dentry); @@ -164,8 +175,7 @@ ssize_t debugfs_attr_write(struct file *file, const char __user *buf, ssize_t debugfs_attr_write_signed(struct file *file, const char __user *buf, size_t len, loff_t *ppos); -struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry, - struct dentry *new_dir, const char *new_name); +int debugfs_change_name(struct dentry *dentry, const char *fmt, ...) __printf(2, 3); void debugfs_create_u8(const char *name, umode_t mode, struct dentry *parent, u8 *value); @@ -259,6 +269,14 @@ static inline struct dentry *debugfs_lookup(const char *name, return ERR_PTR(-ENODEV); } +static inline struct dentry *debugfs_create_file_aux(const char *name, + umode_t mode, struct dentry *parent, + void *data, void *aux, + const void *fops) +{ + return ERR_PTR(-ENODEV); +} + static inline struct dentry *debugfs_create_file(const char *name, umode_t mode, struct dentry *parent, void *data, const void *fops) @@ -312,6 +330,7 @@ static inline void debugfs_lookup_and_remove(const char *name, { } const struct file_operations *debugfs_real_fops(const struct file *filp); +void *debugfs_get_aux(const struct file *file); static inline int debugfs_file_get(struct dentry *dentry) { @@ -341,10 +360,10 @@ static inline ssize_t debugfs_attr_write_signed(struct file *file, return -ENODEV; } -static inline struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry, - struct dentry *new_dir, char *new_name) +static inline int __printf(2, 3) debugfs_change_name(struct dentry *dentry, + const char *fmt, ...) { - return ERR_PTR(-ENODEV); + return -ENODEV; } static inline void debugfs_create_u8(const char *name, umode_t mode, @@ -452,6 +471,11 @@ static inline ssize_t debugfs_read_file_str(struct file *file, #endif +#define debugfs_create_file_aux_num(name, mode, parent, data, n, fops) \ + debugfs_create_file_aux(name, mode, parent, data, \ + (void *)(unsigned long)n, fops) +#define debugfs_get_aux_num(f) (unsigned long)debugfs_get_aux(f) + /** * debugfs_create_xul - create a debugfs file that is used to read and write an * unsigned long value, formatted in hexadecimal diff --git a/include/linux/device.h b/include/linux/device.h index 667cb6db9019..80a5b3268986 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -399,7 +399,23 @@ void __iomem *devm_of_iomap(struct device *dev, #endif /* allows to add/remove a custom action to devres stack */ -void devm_remove_action(struct device *dev, void (*action)(void *), void *data); +int devm_remove_action_nowarn(struct device *dev, void (*action)(void *), void *data); + +/** + * devm_remove_action() - removes previously added custom action + * @dev: Device that owns the action + * @action: Function implementing the action + * @data: Pointer to data passed to @action implementation + * + * Removes instance of @action previously added by devm_add_action(). + * Both action and data should match one of the existing entries. + */ +static inline +void devm_remove_action(struct device *dev, void (*action)(void *), void *data) +{ + WARN_ON(devm_remove_action_nowarn(dev, action, data)); +} + void devm_release_action(struct device *dev, void (*action)(void *), void *data); int __devm_add_action(struct device *dev, void (*action)(void *), void *data, const char *name); @@ -1074,18 +1090,44 @@ void device_del(struct device *dev); DEFINE_FREE(device_del, struct device *, if (_T) device_del(_T)) -int device_for_each_child(struct device *dev, void *data, - int (*fn)(struct device *dev, void *data)); -int device_for_each_child_reverse(struct device *dev, void *data, - int (*fn)(struct device *dev, void *data)); +int device_for_each_child(struct device *parent, void *data, + device_iter_t fn); +int device_for_each_child_reverse(struct device *parent, void *data, + device_iter_t fn); int device_for_each_child_reverse_from(struct device *parent, - struct device *from, const void *data, - int (*fn)(struct device *, const void *)); -struct device *device_find_child(struct device *dev, void *data, - int (*match)(struct device *dev, void *data)); -struct device *device_find_child_by_name(struct device *parent, - const char *name); -struct device *device_find_any_child(struct device *parent); + struct device *from, void *data, + device_iter_t fn); +struct device *device_find_child(struct device *parent, const void *data, + device_match_t match); +/** + * device_find_child_by_name - device iterator for locating a child device. + * @parent: parent struct device + * @name: name of the child device + * + * This is similar to the device_find_child() function above, but it + * returns a reference to a device that has the name @name. + * + * NOTE: you will need to drop the reference with put_device() after use. + */ +static inline struct device *device_find_child_by_name(struct device *parent, + const char *name) +{ + return device_find_child(parent, name, device_match_name); +} + +/** + * device_find_any_child - device iterator for locating a child device, if any. + * @parent: parent struct device + * + * This is similar to the device_find_child() function above, but it + * returns a reference to a child device, if any. + * + * NOTE: you will need to drop the reference with put_device() after use. + */ +static inline struct device *device_find_any_child(struct device *parent) +{ + return device_find_child(parent, NULL, device_match_any); +} int device_rename(struct device *dev, const char *new_name); int device_move(struct device *dev, struct device *new_parent, diff --git a/include/linux/device/bus.h b/include/linux/device/bus.h index b18658bce2c3..f5a56efd2bd6 100644 --- a/include/linux/device/bus.h +++ b/include/linux/device/bus.h @@ -134,6 +134,7 @@ typedef int (*device_match_t)(struct device *dev, const void *data); /* Generic device matching functions that all busses can use to match with */ int device_match_name(struct device *dev, const void *name); +int device_match_type(struct device *dev, const void *type); int device_match_of_node(struct device *dev, const void *np); int device_match_fwnode(struct device *dev, const void *fwnode); int device_match_devt(struct device *dev, const void *pdevt); @@ -141,9 +142,12 @@ int device_match_acpi_dev(struct device *dev, const void *adev); int device_match_acpi_handle(struct device *dev, const void *handle); int device_match_any(struct device *dev, const void *unused); +/* Device iterating function type for various driver core for_each APIs */ +typedef int (*device_iter_t)(struct device *dev, void *data); + /* iterator helpers for buses */ -int bus_for_each_dev(const struct bus_type *bus, struct device *start, void *data, - int (*fn)(struct device *dev, void *data)); +int bus_for_each_dev(const struct bus_type *bus, struct device *start, + void *data, device_iter_t fn); struct device *bus_find_device(const struct bus_type *bus, struct device *start, const void *data, device_match_t match); /** diff --git a/include/linux/device/class.h b/include/linux/device/class.h index 518c9c83d64b..45ee3a634999 100644 --- a/include/linux/device/class.h +++ b/include/linux/device/class.h @@ -82,18 +82,16 @@ bool class_is_registered(const struct class *class); struct class_compat; struct class_compat *class_compat_register(const char *name); void class_compat_unregister(struct class_compat *cls); -int class_compat_create_link(struct class_compat *cls, struct device *dev, - struct device *device_link); -void class_compat_remove_link(struct class_compat *cls, struct device *dev, - struct device *device_link); +int class_compat_create_link(struct class_compat *cls, struct device *dev); +void class_compat_remove_link(struct class_compat *cls, struct device *dev); void class_dev_iter_init(struct class_dev_iter *iter, const struct class *class, const struct device *start, const struct device_type *type); struct device *class_dev_iter_next(struct class_dev_iter *iter); void class_dev_iter_exit(struct class_dev_iter *iter); -int class_for_each_device(const struct class *class, const struct device *start, void *data, - int (*fn)(struct device *dev, void *data)); +int class_for_each_device(const struct class *class, const struct device *start, + void *data, device_iter_t fn); struct device *class_find_device(const struct class *class, const struct device *start, const void *data, device_match_t match); diff --git a/include/linux/device/driver.h b/include/linux/device/driver.h index 5c04b8e3833b..cd8e0f0a634b 100644 --- a/include/linux/device/driver.h +++ b/include/linux/device/driver.h @@ -154,7 +154,7 @@ void driver_remove_file(const struct device_driver *driver, int driver_set_override(struct device *dev, const char **override, const char *s, size_t len); int __must_check driver_for_each_device(struct device_driver *drv, struct device *start, - void *data, int (*fn)(struct device *dev, void *)); + void *data, device_iter_t fn); struct device *driver_find_device(const struct device_driver *drv, struct device *start, const void *data, device_match_t match); diff --git a/include/linux/fsl/mc.h b/include/linux/fsl/mc.h index c90ec889bfc2..99f30c7d6208 100644 --- a/include/linux/fsl/mc.h +++ b/include/linux/fsl/mc.h @@ -438,21 +438,21 @@ struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev, extern const struct bus_type fsl_mc_bus_type; -extern struct device_type fsl_mc_bus_dprc_type; -extern struct device_type fsl_mc_bus_dpni_type; -extern struct device_type fsl_mc_bus_dpio_type; -extern struct device_type fsl_mc_bus_dpsw_type; -extern struct device_type fsl_mc_bus_dpbp_type; -extern struct device_type fsl_mc_bus_dpcon_type; -extern struct device_type fsl_mc_bus_dpmcp_type; -extern struct device_type fsl_mc_bus_dpmac_type; -extern struct device_type fsl_mc_bus_dprtc_type; -extern struct device_type fsl_mc_bus_dpseci_type; -extern struct device_type fsl_mc_bus_dpdmux_type; -extern struct device_type fsl_mc_bus_dpdcei_type; -extern struct device_type fsl_mc_bus_dpaiop_type; -extern struct device_type fsl_mc_bus_dpci_type; -extern struct device_type fsl_mc_bus_dpdmai_type; +extern const struct device_type fsl_mc_bus_dprc_type; +extern const struct device_type fsl_mc_bus_dpni_type; +extern const struct device_type fsl_mc_bus_dpio_type; +extern const struct device_type fsl_mc_bus_dpsw_type; +extern const struct device_type fsl_mc_bus_dpbp_type; +extern const struct device_type fsl_mc_bus_dpcon_type; +extern const struct device_type fsl_mc_bus_dpmcp_type; +extern const struct device_type fsl_mc_bus_dpmac_type; +extern const struct device_type fsl_mc_bus_dprtc_type; +extern const struct device_type fsl_mc_bus_dpseci_type; +extern const struct device_type fsl_mc_bus_dpdmux_type; +extern const struct device_type fsl_mc_bus_dpdcei_type; +extern const struct device_type fsl_mc_bus_dpaiop_type; +extern const struct device_type fsl_mc_bus_dpci_type; +extern const struct device_type fsl_mc_bus_dpdmai_type; static inline bool is_fsl_mc_bus_dprc(const struct fsl_mc_device *mc_dev) { diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h index 76e891ee9e37..51ef131e66b7 100644 --- a/include/linux/kgdb.h +++ b/include/linux/kgdb.h @@ -309,11 +309,9 @@ extern unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs); #ifdef CONFIG_SERIAL_KGDB_NMI extern int kgdb_register_nmi_console(void); extern int kgdb_unregister_nmi_console(void); -extern bool kgdb_nmi_poll_knock(void); #else static inline int kgdb_register_nmi_console(void) { return 0; } static inline int kgdb_unregister_nmi_console(void) { return 0; } -static inline bool kgdb_nmi_poll_knock(void) { return true; } #endif extern int kgdb_register_io_module(struct kgdb_io *local_kgdb_io_ops); diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h index be707748e7ce..150fe2ae1b6b 100644 --- a/include/linux/kobject_ns.h +++ b/include/linux/kobject_ns.h @@ -52,8 +52,6 @@ const struct kobj_ns_type_operations *kobj_ns_ops(const struct kobject *kobj); bool kobj_ns_current_may_mount(enum kobj_ns_type type); void *kobj_ns_grab_current(enum kobj_ns_type type); -const void *kobj_ns_netlink(enum kobj_ns_type type, struct sock *sk); -const void *kobj_ns_initial(enum kobj_ns_type type); void kobj_ns_drop(enum kobj_ns_type type, void *ns); #endif /* _LINUX_KOBJECT_NS_H */ diff --git a/include/linux/nfs_common.h b/include/linux/nfs_common.h index 5fc02df88252..a541c3a02887 100644 --- a/include/linux/nfs_common.h +++ b/include/linux/nfs_common.h @@ -9,9 +9,10 @@ #include <uapi/linux/nfs.h> /* Mapping from NFS error code to "errno" error code. */ -#define errno_NFSERR_IO EIO int nfs_stat_to_errno(enum nfs_stat status); int nfs4_stat_to_errno(int stat); +__u32 nfs_localio_errno_to_nfs4_stat(int errno); + #endif /* _LINUX_NFS_COMMON_H */ diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 039898d70954..67ae2c3f41d2 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -77,6 +77,23 @@ struct nfs_lock_context { struct rcu_head rcu_head; }; +struct nfs_file_localio { + struct nfsd_file __rcu *ro_file; + struct nfsd_file __rcu *rw_file; + struct list_head list; + void __rcu *nfs_uuid; /* opaque pointer to 'nfs_uuid_t' */ +}; + +static inline void nfs_localio_file_init(struct nfs_file_localio *nfl) +{ +#if IS_ENABLED(CONFIG_NFS_LOCALIO) + nfl->ro_file = NULL; + nfl->rw_file = NULL; + INIT_LIST_HEAD(&nfl->list); + nfl->nfs_uuid = NULL; +#endif +} + struct nfs4_state; struct nfs_open_context { struct nfs_lock_context lock_context; @@ -87,15 +104,16 @@ struct nfs_open_context { struct nfs4_state *state; fmode_t mode; + int error; unsigned long flags; #define NFS_CONTEXT_BAD (2) #define NFS_CONTEXT_UNLOCK (3) #define NFS_CONTEXT_FILE_OPEN (4) - int error; - struct list_head list; struct nfs4_threshold *mdsthreshold; + struct list_head list; struct rcu_head rcu_head; + struct nfs_file_localio nfl; }; struct nfs_open_dir_context { diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index b804346a9741..f00bfcee7120 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h @@ -50,7 +50,6 @@ struct nfs_client { #define NFS_CS_DS 7 /* - Server is a DS */ #define NFS_CS_REUSEPORT 8 /* - reuse src port on reconnect */ #define NFS_CS_PNFS 9 /* - Server used for pnfs */ -#define NFS_CS_LOCAL_IO 10 /* - client is local */ struct sockaddr_storage cl_addr; /* server identifier */ size_t cl_addrlen; char * cl_hostname; /* hostname of server */ @@ -132,7 +131,7 @@ struct nfs_client { struct timespec64 cl_nfssvc_boot; seqlock_t cl_boot_lock; nfs_uuid_t cl_uuid; - spinlock_t cl_localio_lock; + struct work_struct cl_local_probe_work; #endif /* CONFIG_NFS_LOCALIO */ }; diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index e74a87bb18a4..162b7c0c3555 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -1632,6 +1632,7 @@ enum { NFS_IOHDR_RESEND_PNFS, NFS_IOHDR_RESEND_MDS, NFS_IOHDR_UNSTABLE_WRITES, + NFS_IOHDR_ODIRECT, }; struct nfs_io_completion; diff --git a/include/linux/nfslocalio.h b/include/linux/nfslocalio.h index 9202f4b24343..9aa8a43843d7 100644 --- a/include/linux/nfslocalio.h +++ b/include/linux/nfslocalio.h @@ -6,9 +6,6 @@ #ifndef __LINUX_NFSLOCALIO_H #define __LINUX_NFSLOCALIO_H -/* nfsd_file structure is purposely kept opaque to NFS client */ -struct nfsd_file; - #if IS_ENABLED(CONFIG_NFS_LOCALIO) #include <linux/module.h> @@ -19,6 +16,9 @@ struct nfsd_file; #include <linux/nfs.h> #include <net/net_namespace.h> +struct nfs_client; +struct nfs_file_localio; + /* * Useful to allow a client to negotiate if localio * possible with its server. @@ -27,28 +27,38 @@ struct nfsd_file; */ typedef struct { uuid_t uuid; + unsigned nfs3_localio_probe_count; + /* this struct is over a cacheline, avoid bouncing */ + spinlock_t ____cacheline_aligned lock; struct list_head list; + spinlock_t *list_lock; /* nn->local_clients_lock */ struct net __rcu *net; /* nfsd's network namespace */ struct auth_domain *dom; /* auth_domain for localio */ + /* Local files to close when net is shut down or exports change */ + struct list_head files; } nfs_uuid_t; void nfs_uuid_init(nfs_uuid_t *); bool nfs_uuid_begin(nfs_uuid_t *); void nfs_uuid_end(nfs_uuid_t *); -void nfs_uuid_is_local(const uuid_t *, struct list_head *, +void nfs_uuid_is_local(const uuid_t *, struct list_head *, spinlock_t *, struct net *, struct auth_domain *, struct module *); -void nfs_uuid_invalidate_clients(struct list_head *list); -void nfs_uuid_invalidate_one_client(nfs_uuid_t *nfs_uuid); + +void nfs_localio_enable_client(struct nfs_client *clp); +void nfs_localio_disable_client(struct nfs_client *clp); +void nfs_localio_invalidate_clients(struct list_head *nn_local_clients, + spinlock_t *nn_local_clients_lock); /* localio needs to map filehandle -> struct nfsd_file */ extern struct nfsd_file * nfsd_open_local_fh(struct net *, struct auth_domain *, struct rpc_clnt *, const struct cred *, const struct nfs_fh *, const fmode_t) __must_hold(rcu); +void nfs_close_local_fh(struct nfs_file_localio *); struct nfsd_localio_operations { - bool (*nfsd_serv_try_get)(struct net *); - void (*nfsd_serv_put)(struct net *); + bool (*nfsd_net_try_get)(struct net *); + void (*nfsd_net_put)(struct net *); struct nfsd_file *(*nfsd_open_local_fh)(struct net *, struct auth_domain *, struct rpc_clnt *, @@ -56,6 +66,8 @@ struct nfsd_localio_operations { const struct nfs_fh *, const fmode_t); struct net *(*nfsd_file_put_local)(struct nfsd_file *); + struct nfsd_file *(*nfsd_file_get)(struct nfsd_file *); + void (*nfsd_file_put)(struct nfsd_file *); struct file *(*nfsd_file_file)(struct nfsd_file *); } ____cacheline_aligned; @@ -64,17 +76,18 @@ extern const struct nfsd_localio_operations *nfs_to; struct nfsd_file *nfs_open_local_fh(nfs_uuid_t *, struct rpc_clnt *, const struct cred *, - const struct nfs_fh *, const fmode_t); + const struct nfs_fh *, struct nfs_file_localio *, + const fmode_t); static inline void nfs_to_nfsd_net_put(struct net *net) { /* - * Once reference to nfsd_serv is dropped, NFSD could be - * unloaded, so ensure safe return from nfsd_file_put_local() - * by always taking RCU. + * Once reference to net (and associated nfsd_serv) is dropped, NFSD + * could be unloaded, so ensure safe return from nfsd_net_put() by + * always taking RCU. */ rcu_read_lock(); - nfs_to->nfsd_serv_put(net); + nfs_to->nfsd_net_put(net); rcu_read_unlock(); } @@ -91,12 +104,19 @@ static inline void nfs_to_nfsd_file_put_local(struct nfsd_file *localio) } #else /* CONFIG_NFS_LOCALIO */ + +struct nfs_file_localio; +static inline void nfs_close_local_fh(struct nfs_file_localio *nfl) +{ +} static inline void nfsd_localio_ops_init(void) { } -static inline void nfs_to_nfsd_file_put_local(struct nfsd_file *localio) +struct nfs_client; +static inline void nfs_localio_disable_client(struct nfs_client *clp) { } + #endif /* CONFIG_NFS_LOCALIO */ #endif /* __LINUX_NFSLOCALIO_H */ diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index d2402bf4aea2..de5deb1a0118 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2593,6 +2593,11 @@ #define PCI_VENDOR_ID_REDHAT 0x1b36 +#define PCI_VENDOR_ID_WCHIC 0x1c00 +#define PCI_DEVICE_ID_WCHIC_CH382_0S1P 0x3050 +#define PCI_DEVICE_ID_WCHIC_CH382_2S1P 0x3250 +#define PCI_DEVICE_ID_WCHIC_CH382_2S 0x3253 + #define PCI_VENDOR_ID_SILICOM_DENMARK 0x1c2c #define PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS 0x1c36 @@ -2647,6 +2652,12 @@ #define PCI_VENDOR_ID_AKS 0x416c #define PCI_DEVICE_ID_AKS_ALADDINCARD 0x0100 +#define PCI_VENDOR_ID_WCHCN 0x4348 +#define PCI_DEVICE_ID_WCHCN_CH353_4S 0x3453 +#define PCI_DEVICE_ID_WCHCN_CH353_2S1PF 0x5046 +#define PCI_DEVICE_ID_WCHCN_CH353_1S1P 0x5053 +#define PCI_DEVICE_ID_WCHCN_CH353_2S1P 0x7053 + #define PCI_VENDOR_ID_ACCESSIO 0x494f #define PCI_DEVICE_ID_ACCESSIO_WDG_CSM 0x22c0 diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h index e0717c8393d7..144de7a7948d 100644 --- a/include/linux/serial_8250.h +++ b/include/linux/serial_8250.h @@ -161,8 +161,8 @@ struct uart_8250_port { void (*dl_write)(struct uart_8250_port *up, u32 value); struct uart_8250_em485 *em485; - void (*rs485_start_tx)(struct uart_8250_port *); - void (*rs485_stop_tx)(struct uart_8250_port *); + void (*rs485_start_tx)(struct uart_8250_port *up, bool toggle_ier); + void (*rs485_stop_tx)(struct uart_8250_port *up, bool toggle_ier); /* Serial port overrun backoff */ struct delayed_work overrun_backoff; diff --git a/include/linux/stat.h b/include/linux/stat.h index 9d8382e23a9c..be7496a6a0dd 100644 --- a/include/linux/stat.h +++ b/include/linux/stat.h @@ -50,11 +50,11 @@ struct kstat { struct timespec64 btime; /* File creation time */ u64 blocks; u64 mnt_id; + u64 change_cookie; + u64 subvol; u32 dio_mem_align; u32 dio_offset_align; u32 dio_read_offset_align; - u64 change_cookie; - u64 subvol; u32 atomic_write_unit_min; u32 atomic_write_unit_max; u32 atomic_write_segments_max; diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h index 5321585c778f..fec976e58174 100644 --- a/include/linux/sunrpc/clnt.h +++ b/include/linux/sunrpc/clnt.h @@ -93,6 +93,7 @@ struct rpc_clnt { const struct cred *cl_cred; unsigned int cl_max_connect; /* max number of transports not to the same IP */ struct super_block *pipefs_sb; + atomic_t cl_task_count; }; /* diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h index 0f2fcd244523..18f7e1fd093c 100644 --- a/include/linux/sysfs.h +++ b/include/linux/sysfs.h @@ -293,7 +293,7 @@ __ATTRIBUTE_GROUPS(_name) #define BIN_ATTRIBUTE_GROUPS(_name) \ static const struct attribute_group _name##_group = { \ - .bin_attrs = _name##_attrs, \ + .bin_attrs_new = _name##_attrs, \ }; \ __ATTRIBUTE_GROUPS(_name) @@ -511,7 +511,7 @@ __printf(3, 4) int sysfs_emit_at(char *buf, int at, const char *fmt, ...); ssize_t sysfs_bin_attr_simple_read(struct file *file, struct kobject *kobj, - struct bin_attribute *attr, char *buf, + const struct bin_attribute *attr, char *buf, loff_t off, size_t count); #else /* CONFIG_SYSFS */ @@ -774,7 +774,7 @@ static inline int sysfs_emit_at(char *buf, int at, const char *fmt, ...) static inline ssize_t sysfs_bin_attr_simple_read(struct file *file, struct kobject *kobj, - struct bin_attribute *attr, + const struct bin_attribute *attr, char *buf, loff_t off, size_t count) { diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h index 5474494a1e99..76de2b662f4f 100644 --- a/include/scsi/scsi_transport_iscsi.h +++ b/include/scsi/scsi_transport_iscsi.h @@ -493,8 +493,8 @@ extern void iscsi_destroy_all_flashnode(struct Scsi_Host *shost); extern int iscsi_flashnode_bus_match(struct device *dev, const struct device_driver *drv); extern struct device * -iscsi_find_flashnode_sess(struct Scsi_Host *shost, void *data, - int (*fn)(struct device *dev, void *data)); +iscsi_find_flashnode_sess(struct Scsi_Host *shost, const void *data, + device_match_t fn); extern struct device * iscsi_find_flashnode_conn(struct iscsi_bus_flash_session *fnode_sess); diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h index f1e99458e29e..5e0eb41d967e 100644 --- a/include/uapi/linux/fuse.h +++ b/include/uapi/linux/fuse.h @@ -220,6 +220,15 @@ * * 7.41 * - add FUSE_ALLOW_IDMAP + * 7.42 + * - Add FUSE_OVER_IO_URING and all other io-uring related flags and data + * structures: + * - struct fuse_uring_ent_in_out + * - struct fuse_uring_req_header + * - struct fuse_uring_cmd_req + * - FUSE_URING_IN_OUT_HEADER_SZ + * - FUSE_URING_OP_IN_OUT_SZ + * - enum fuse_uring_cmd */ #ifndef _LINUX_FUSE_H @@ -255,7 +264,7 @@ #define FUSE_KERNEL_VERSION 7 /** Minor version number of this interface */ -#define FUSE_KERNEL_MINOR_VERSION 41 +#define FUSE_KERNEL_MINOR_VERSION 42 /** The node ID of the root inode */ #define FUSE_ROOT_ID 1 @@ -425,6 +434,7 @@ struct fuse_file_lock { * FUSE_HAS_RESEND: kernel supports resending pending requests, and the high bit * of the request ID indicates resend requests * FUSE_ALLOW_IDMAP: allow creation of idmapped mounts + * FUSE_OVER_IO_URING: Indicate that client supports io-uring */ #define FUSE_ASYNC_READ (1 << 0) #define FUSE_POSIX_LOCKS (1 << 1) @@ -471,6 +481,7 @@ struct fuse_file_lock { /* Obsolete alias for FUSE_DIRECT_IO_ALLOW_MMAP */ #define FUSE_DIRECT_IO_RELAX FUSE_DIRECT_IO_ALLOW_MMAP #define FUSE_ALLOW_IDMAP (1ULL << 40) +#define FUSE_OVER_IO_URING (1ULL << 41) /** * CUSE INIT request/reply flags @@ -1206,4 +1217,67 @@ struct fuse_supp_groups { uint32_t groups[]; }; +/** + * Size of the ring buffer header + */ +#define FUSE_URING_IN_OUT_HEADER_SZ 128 +#define FUSE_URING_OP_IN_OUT_SZ 128 + +/* Used as part of the fuse_uring_req_header */ +struct fuse_uring_ent_in_out { + uint64_t flags; + + /* + * commit ID to be used in a reply to a ring request (see also + * struct fuse_uring_cmd_req) + */ + uint64_t commit_id; + + /* size of user payload buffer */ + uint32_t payload_sz; + uint32_t padding; + + uint64_t reserved; +}; + +/** + * Header for all fuse-io-uring requests + */ +struct fuse_uring_req_header { + /* struct fuse_in_header / struct fuse_out_header */ + char in_out[FUSE_URING_IN_OUT_HEADER_SZ]; + + /* per op code header */ + char op_in[FUSE_URING_OP_IN_OUT_SZ]; + + struct fuse_uring_ent_in_out ring_ent_in_out; +}; + +/** + * sqe commands to the kernel + */ +enum fuse_uring_cmd { + FUSE_IO_URING_CMD_INVALID = 0, + + /* register the request buffer and fetch a fuse request */ + FUSE_IO_URING_CMD_REGISTER = 1, + + /* commit fuse request result and fetch next request */ + FUSE_IO_URING_CMD_COMMIT_AND_FETCH = 2, +}; + +/** + * In the 80B command area of the SQE. + */ +struct fuse_uring_cmd_req { + uint64_t flags; + + /* entry identifier for commits */ + uint64_t commit_id; + + /* queue the command is for (queue index) */ + uint16_t qid; + uint8_t padding[6]; +}; + #endif /* _LINUX_FUSE_H */ diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 8396ce1d0fba..9de6acddd479 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -8001,17 +8001,6 @@ struct btf_module { static LIST_HEAD(btf_modules); static DEFINE_MUTEX(btf_module_mutex); -static ssize_t -btf_module_read(struct file *file, struct kobject *kobj, - struct bin_attribute *bin_attr, - char *buf, loff_t off, size_t len) -{ - const struct btf *btf = bin_attr->private; - - memcpy(buf, btf->data + off, len); - return len; -} - static void purge_cand_cache(struct btf *btf); static int btf_module_notify(struct notifier_block *nb, unsigned long op, @@ -8072,8 +8061,8 @@ static int btf_module_notify(struct notifier_block *nb, unsigned long op, attr->attr.name = btf->name; attr->attr.mode = 0444; attr->size = btf->data_size; - attr->private = btf; - attr->read = btf_module_read; + attr->private = btf->data; + attr->read_new = sysfs_bin_attr_simple_read; err = sysfs_create_bin_file(btf_kobj, attr); if (err) { diff --git a/kernel/bpf/sysfs_btf.c b/kernel/bpf/sysfs_btf.c index fedb54c94cdb..81d6cf90584a 100644 --- a/kernel/bpf/sysfs_btf.c +++ b/kernel/bpf/sysfs_btf.c @@ -12,24 +12,16 @@ extern char __start_BTF[]; extern char __stop_BTF[]; -static ssize_t -btf_vmlinux_read(struct file *file, struct kobject *kobj, - struct bin_attribute *bin_attr, - char *buf, loff_t off, size_t len) -{ - memcpy(buf, __start_BTF + off, len); - return len; -} - static struct bin_attribute bin_attr_btf_vmlinux __ro_after_init = { .attr = { .name = "vmlinux", .mode = 0444, }, - .read = btf_vmlinux_read, + .read_new = sysfs_bin_attr_simple_read, }; struct kobject *btf_kobj; static int __init btf_vmlinux_init(void) { + bin_attr_btf_vmlinux.private = __start_BTF; bin_attr_btf_vmlinux.size = __stop_BTF - __start_BTF; if (bin_attr_btf_vmlinux.size == 0) diff --git a/kernel/kheaders.c b/kernel/kheaders.c index 42163c9e94e5..378088b07f46 100644 --- a/kernel/kheaders.c +++ b/kernel/kheaders.c @@ -29,25 +29,12 @@ asm ( extern char kernel_headers_data[]; extern char kernel_headers_data_end[]; -static ssize_t -ikheaders_read(struct file *file, struct kobject *kobj, - struct bin_attribute *bin_attr, - char *buf, loff_t off, size_t len) -{ - memcpy(buf, &kernel_headers_data[off], len); - return len; -} - -static struct bin_attribute kheaders_attr __ro_after_init = { - .attr = { - .name = "kheaders.tar.xz", - .mode = 0444, - }, - .read = &ikheaders_read, -}; +static struct bin_attribute kheaders_attr __ro_after_init = + __BIN_ATTR_SIMPLE_RO(kheaders.tar.xz, 0444); static int __init ikheaders_init(void) { + kheaders_attr.private = kernel_headers_data; kheaders_attr.size = (kernel_headers_data_end - kernel_headers_data); return sysfs_create_bin_file(kernel_kobj, &kheaders_attr); diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c index 1bab21b4718f..eefb67d9883c 100644 --- a/kernel/ksysfs.c +++ b/kernel/ksysfs.c @@ -239,21 +239,7 @@ extern const void __start_notes; extern const void __stop_notes; #define notes_size (&__stop_notes - &__start_notes) -static ssize_t notes_read(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin_attr, - char *buf, loff_t off, size_t count) -{ - memcpy(buf, &__start_notes + off, count); - return count; -} - -static struct bin_attribute notes_attr __ro_after_init = { - .attr = { - .name = "notes", - .mode = S_IRUGO, - }, - .read = ¬es_read, -}; +static __ro_after_init BIN_ATTR_SIMPLE_RO(notes); struct kobject *kernel_kobj; EXPORT_SYMBOL_GPL(kernel_kobj); @@ -307,8 +293,9 @@ static int __init ksysfs_init(void) goto kset_exit; if (notes_size > 0) { - notes_attr.size = notes_size; - error = sysfs_create_bin_file(kernel_kobj, ¬es_attr); + bin_attr_notes.private = (void *)&__start_notes; + bin_attr_notes.size = notes_size; + error = sysfs_create_bin_file(kernel_kobj, &bin_attr_notes); if (error) goto group_exit; } diff --git a/kernel/module/sysfs.c b/kernel/module/sysfs.c index f99616499e2e..b401ff4b02d2 100644 --- a/kernel/module/sysfs.c +++ b/kernel/module/sysfs.c @@ -190,7 +190,7 @@ static int add_notes_attrs(struct module *mod, const struct load_info *info) nattr->attr.mode = 0444; nattr->size = info->sechdrs[i].sh_size; nattr->private = (void *)info->sechdrs[i].sh_addr; - nattr->read = sysfs_bin_attr_simple_read; + nattr->read_new = sysfs_bin_attr_simple_read; *(gattr++) = nattr++; } ++loaded; diff --git a/kernel/smp.c b/kernel/smp.c index f104c8e83fc4..974f3a3962e8 100644 --- a/kernel/smp.c +++ b/kernel/smp.c @@ -170,9 +170,9 @@ static DEFINE_PER_CPU(smp_call_func_t, cur_csd_func); static DEFINE_PER_CPU(void *, cur_csd_info); static ulong csd_lock_timeout = 5000; /* CSD lock timeout in milliseconds. */ -module_param(csd_lock_timeout, ulong, 0444); +module_param(csd_lock_timeout, ulong, 0644); static int panic_on_ipistall; /* CSD panic timeout in milliseconds, 300000 for five minutes. */ -module_param(panic_on_ipistall, int, 0444); +module_param(panic_on_ipistall, int, 0644); static atomic_t csd_bug_count = ATOMIC_INIT(0); diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index da821ce258ea..8896d844d738 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c @@ -250,8 +250,8 @@ static int multi_cpu_stop(void *data) * be detected and reported on their side. */ touch_nmi_watchdog(); + rcu_momentary_eqs(); } - rcu_momentary_eqs(); } while (curstate != MULTI_STOP_EXIT); local_irq_restore(flags); diff --git a/lib/kobject.c b/lib/kobject.c index 72fa20f405f1..abe5f5b856ce 100644 --- a/lib/kobject.c +++ b/lib/kobject.c @@ -1096,30 +1096,6 @@ void *kobj_ns_grab_current(enum kobj_ns_type type) } EXPORT_SYMBOL_GPL(kobj_ns_grab_current); -const void *kobj_ns_netlink(enum kobj_ns_type type, struct sock *sk) -{ - const void *ns = NULL; - - spin_lock(&kobj_ns_type_lock); - if (kobj_ns_type_is_valid(type) && kobj_ns_ops_tbl[type]) - ns = kobj_ns_ops_tbl[type]->netlink_ns(sk); - spin_unlock(&kobj_ns_type_lock); - - return ns; -} - -const void *kobj_ns_initial(enum kobj_ns_type type) -{ - const void *ns = NULL; - - spin_lock(&kobj_ns_type_lock); - if (kobj_ns_type_is_valid(type) && kobj_ns_ops_tbl[type]) - ns = kobj_ns_ops_tbl[type]->initial_ns(); - spin_unlock(&kobj_ns_type_lock); - - return ns; -} - void kobj_ns_drop(enum kobj_ns_type type, void *ns) { spin_lock(&kobj_ns_type_lock); diff --git a/mm/shrinker_debug.c b/mm/shrinker_debug.c index 4a85b94d12ce..794bd433cce0 100644 --- a/mm/shrinker_debug.c +++ b/mm/shrinker_debug.c @@ -195,8 +195,6 @@ int shrinker_debugfs_add(struct shrinker *shrinker) int shrinker_debugfs_rename(struct shrinker *shrinker, const char *fmt, ...) { - struct dentry *entry; - char buf[128]; const char *new, *old; va_list ap; int ret = 0; @@ -213,18 +211,8 @@ int shrinker_debugfs_rename(struct shrinker *shrinker, const char *fmt, ...) old = shrinker->name; shrinker->name = new; - if (shrinker->debugfs_entry) { - snprintf(buf, sizeof(buf), "%s-%d", shrinker->name, - shrinker->debugfs_id); - - entry = debugfs_rename(shrinker_debugfs_root, - shrinker->debugfs_entry, - shrinker_debugfs_root, buf); - if (IS_ERR(entry)) - ret = PTR_ERR(entry); - else - shrinker->debugfs_entry = entry; - } + ret = debugfs_change_name(shrinker->debugfs_entry, "%s-%d", + shrinker->name, shrinker->debugfs_id); mutex_unlock(&shrinker_mutex); diff --git a/mm/slub.c b/mm/slub.c index 996691c137eb..1f50129dcfb3 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -7509,10 +7509,7 @@ static int slab_debug_trace_open(struct inode *inode, struct file *filep) return -ENOMEM; } - if (strcmp(filep->f_path.dentry->d_name.name, "alloc_traces") == 0) - alloc = TRACK_ALLOC; - else - alloc = TRACK_FREE; + alloc = debugfs_get_aux_num(filep); if (!alloc_loc_track(t, PAGE_SIZE / sizeof(struct location), GFP_KERNEL)) { bitmap_free(obj_map); @@ -7568,11 +7565,11 @@ static void debugfs_slab_add(struct kmem_cache *s) slab_cache_dir = debugfs_create_dir(s->name, slab_debugfs_root); - debugfs_create_file("alloc_traces", 0400, - slab_cache_dir, s, &slab_debugfs_fops); + debugfs_create_file_aux_num("alloc_traces", 0400, slab_cache_dir, s, + TRACK_ALLOC, &slab_debugfs_fops); - debugfs_create_file("free_traces", 0400, - slab_cache_dir, s, &slab_debugfs_fops); + debugfs_create_file_aux_num("free_traces", 0400, slab_cache_dir, s, + TRACK_FREE, &slab_debugfs_fops); } void debugfs_slab_release(struct kmem_cache *s) diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index 5a7c0e565a89..e827775baf2e 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c @@ -1367,7 +1367,7 @@ static int dsa_switch_parse_of(struct dsa_switch *ds, struct device_node *dn) return dsa_switch_parse_ports_of(ds, dn); } -static int dev_is_class(struct device *dev, void *class) +static int dev_is_class(struct device *dev, const void *class) { if (dev->class != NULL && !strcmp(dev->class->name, class)) return 1; diff --git a/net/hsr/hsr_debugfs.c b/net/hsr/hsr_debugfs.c index 1a195efc79cd..5b2cfac3b2ba 100644 --- a/net/hsr/hsr_debugfs.c +++ b/net/hsr/hsr_debugfs.c @@ -57,14 +57,11 @@ DEFINE_SHOW_ATTRIBUTE(hsr_node_table); void hsr_debugfs_rename(struct net_device *dev) { struct hsr_priv *priv = netdev_priv(dev); - struct dentry *d; + int err; - d = debugfs_rename(hsr_debugfs_root_dir, priv->node_tbl_root, - hsr_debugfs_root_dir, dev->name); - if (IS_ERR(d)) + err = debugfs_change_name(priv->node_tbl_root, "%s", dev->name); + if (err) netdev_warn(dev, "failed to rename\n"); - else - priv->node_tbl_root = d; } /* hsr_debugfs_init - create hsr node_table file for dumping diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c index e7687a7b1683..54c479910d05 100644 --- a/net/mac80211/debugfs_netdev.c +++ b/net/mac80211/debugfs_netdev.c @@ -1025,16 +1025,7 @@ void ieee80211_debugfs_remove_netdev(struct ieee80211_sub_if_data *sdata) void ieee80211_debugfs_rename_netdev(struct ieee80211_sub_if_data *sdata) { - struct dentry *dir; - char buf[10 + IFNAMSIZ]; - - dir = sdata->vif.debugfs_dir; - - if (IS_ERR_OR_NULL(dir)) - return; - - sprintf(buf, "netdev:%s", sdata->name); - debugfs_rename(dir->d_parent, dir, dir->d_parent, buf); + debugfs_change_name(sdata->vif.debugfs_dir, "netdev:%s", sdata->name); } void ieee80211_debugfs_recreate_netdev(struct ieee80211_sub_if_data *sdata, diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 0090162ee8c3..2fe88ea79a70 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -958,12 +958,17 @@ void rpc_shutdown_client(struct rpc_clnt *clnt) trace_rpc_clnt_shutdown(clnt); + clnt->cl_shutdown = 1; while (!list_empty(&clnt->cl_tasks)) { rpc_killall_tasks(clnt); wait_event_timeout(destroy_wait, list_empty(&clnt->cl_tasks), 1*HZ); } + /* wait for tasks still in workqueue or waitqueue */ + wait_event_timeout(destroy_wait, + atomic_read(&clnt->cl_task_count) == 0, 1 * HZ); + rpc_release_client(clnt); } EXPORT_SYMBOL_GPL(rpc_shutdown_client); @@ -1139,6 +1144,7 @@ void rpc_task_release_client(struct rpc_task *task) list_del(&task->tk_task); spin_unlock(&clnt->cl_lock); task->tk_client = NULL; + atomic_dec(&clnt->cl_task_count); rpc_release_client(clnt); } @@ -1189,10 +1195,7 @@ void rpc_task_set_client(struct rpc_task *task, struct rpc_clnt *clnt) task->tk_flags |= RPC_TASK_TIMEOUT; if (clnt->cl_noretranstimeo) task->tk_flags |= RPC_TASK_NO_RETRANS_TIMEOUT; - /* Add to the client's list of all tasks */ - spin_lock(&clnt->cl_lock); - list_add_tail(&task->tk_task, &clnt->cl_tasks); - spin_unlock(&clnt->cl_lock); + atomic_inc(&clnt->cl_task_count); } static void @@ -1787,9 +1790,14 @@ call_reserveresult(struct rpc_task *task) if (status >= 0) { if (task->tk_rqstp) { task->tk_action = call_refresh; + + /* Add to the client's list of all tasks */ + spin_lock(&task->tk_client->cl_lock); + if (list_empty(&task->tk_task)) + list_add_tail(&task->tk_task, &task->tk_client->cl_tasks); + spin_unlock(&task->tk_client->cl_lock); return; } - rpc_call_rpcerror(task, -EIO); return; } @@ -1854,13 +1862,13 @@ call_refreshresult(struct rpc_task *task) fallthrough; case -EAGAIN: status = -EACCES; - fallthrough; - case -EKEYEXPIRED: if (!task->tk_cred_retry) break; task->tk_cred_retry--; trace_rpc_retry_refresh_status(task); return; + case -EKEYEXPIRED: + break; case -ENOMEM: rpc_delay(task, HZ >> 4); return; @@ -3319,8 +3327,11 @@ bool rpc_clnt_xprt_switch_has_addr(struct rpc_clnt *clnt, EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_has_addr); #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) -static void rpc_show_header(void) +static void rpc_show_header(struct rpc_clnt *clnt) { + printk(KERN_INFO "clnt[%pISpc] RPC tasks[%d]\n", + (struct sockaddr *)&clnt->cl_xprt->addr, + atomic_read(&clnt->cl_task_count)); printk(KERN_INFO "-pid- flgs status -client- --rqstp- " "-timeout ---ops--\n"); } @@ -3352,7 +3363,7 @@ void rpc_show_tasks(struct net *net) spin_lock(&clnt->cl_lock); list_for_each_entry(task, &clnt->cl_tasks, tk_task) { if (!header) { - rpc_show_header(); + rpc_show_header(clnt); header++; } rpc_show_task(clnt, task); diff --git a/net/sunrpc/debugfs.c b/net/sunrpc/debugfs.c index a176d5a0b0ee..32417db340de 100644 --- a/net/sunrpc/debugfs.c +++ b/net/sunrpc/debugfs.c @@ -74,6 +74,9 @@ tasks_stop(struct seq_file *f, void *v) { struct rpc_clnt *clnt = f->private; spin_unlock(&clnt->cl_lock); + seq_printf(f, "clnt[%pISpc] RPC tasks[%d]\n", + (struct sockaddr *)&clnt->cl_xprt->addr, + atomic_read(&clnt->cl_task_count)); } static const struct seq_operations tasks_seq_operations = { @@ -179,6 +182,18 @@ xprt_info_show(struct seq_file *f, void *v) seq_printf(f, "addr: %s\n", xprt->address_strings[RPC_DISPLAY_ADDR]); seq_printf(f, "port: %s\n", xprt->address_strings[RPC_DISPLAY_PORT]); seq_printf(f, "state: 0x%lx\n", xprt->state); + seq_printf(f, "netns: %u\n", xprt->xprt_net->ns.inum); + + if (xprt->ops->get_srcaddr) { + int ret, buflen; + char buf[INET6_ADDRSTRLEN]; + + buflen = ARRAY_SIZE(buf); + ret = xprt->ops->get_srcaddr(xprt, buf, buflen); + if (ret < 0) + ret = sprintf(buf, "<closed>"); + seq_printf(f, "saddr: %.*s\n", ret, buf); + } return 0; } diff --git a/net/wireless/core.c b/net/wireless/core.c index 70857018f020..12b780de8779 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -143,10 +143,7 @@ int cfg80211_dev_rename(struct cfg80211_registered_device *rdev, if (result) return result; - if (!IS_ERR_OR_NULL(rdev->wiphy.debugfsdir)) - debugfs_rename(rdev->wiphy.debugfsdir->d_parent, - rdev->wiphy.debugfsdir, - rdev->wiphy.debugfsdir->d_parent, newname); + debugfs_change_name(rdev->wiphy.debugfsdir, "%s", newname); nl80211_notify_wiphy(rdev, NL80211_CMD_NEW_WIPHY); diff --git a/rust/bindings/bindings_helper.h b/rust/bindings/bindings_helper.h index 5c4dfe22f41a..55354e4dec14 100644 --- a/rust/bindings/bindings_helper.h +++ b/rust/bindings/bindings_helper.h @@ -20,9 +20,13 @@ #include <linux/jump_label.h> #include <linux/mdio.h> #include <linux/miscdevice.h> +#include <linux/of_device.h> +#include <linux/pci.h> #include <linux/phy.h> #include <linux/pid_namespace.h> +#include <linux/platform_device.h> #include <linux/poll.h> +#include <linux/property.h> #include <linux/refcount.h> #include <linux/sched.h> #include <linux/security.h> diff --git a/rust/helpers/device.c b/rust/helpers/device.c new file mode 100644 index 000000000000..b2135c6686b0 --- /dev/null +++ b/rust/helpers/device.c @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/device.h> + +int rust_helper_devm_add_action(struct device *dev, + void (*action)(void *), + void *data) +{ + return devm_add_action(dev, action, data); +} diff --git a/rust/helpers/helpers.c b/rust/helpers/helpers.c index dcf827a61b52..0640b7e115be 100644 --- a/rust/helpers/helpers.c +++ b/rust/helpers/helpers.c @@ -12,14 +12,19 @@ #include "build_assert.c" #include "build_bug.c" #include "cred.c" +#include "device.c" #include "err.c" #include "fs.c" +#include "io.c" #include "jump_label.c" #include "kunit.c" #include "mutex.c" #include "page.c" +#include "platform.c" +#include "pci.c" #include "pid_namespace.c" #include "rbtree.c" +#include "rcu.c" #include "refcount.c" #include "security.c" #include "signal.c" diff --git a/rust/helpers/io.c b/rust/helpers/io.c new file mode 100644 index 000000000000..4c2401ccd720 --- /dev/null +++ b/rust/helpers/io.c @@ -0,0 +1,101 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/io.h> + +void __iomem *rust_helper_ioremap(phys_addr_t offset, size_t size) +{ + return ioremap(offset, size); +} + +void rust_helper_iounmap(volatile void __iomem *addr) +{ + iounmap(addr); +} + +u8 rust_helper_readb(const volatile void __iomem *addr) +{ + return readb(addr); +} + +u16 rust_helper_readw(const volatile void __iomem *addr) +{ + return readw(addr); +} + +u32 rust_helper_readl(const volatile void __iomem *addr) +{ + return readl(addr); +} + +#ifdef CONFIG_64BIT +u64 rust_helper_readq(const volatile void __iomem *addr) +{ + return readq(addr); +} +#endif + +void rust_helper_writeb(u8 value, volatile void __iomem *addr) +{ + writeb(value, addr); +} + +void rust_helper_writew(u16 value, volatile void __iomem *addr) +{ + writew(value, addr); +} + +void rust_helper_writel(u32 value, volatile void __iomem *addr) +{ + writel(value, addr); +} + +#ifdef CONFIG_64BIT +void rust_helper_writeq(u64 value, volatile void __iomem *addr) +{ + writeq(value, addr); +} +#endif + +u8 rust_helper_readb_relaxed(const volatile void __iomem *addr) +{ + return readb_relaxed(addr); +} + +u16 rust_helper_readw_relaxed(const volatile void __iomem *addr) +{ + return readw_relaxed(addr); +} + +u32 rust_helper_readl_relaxed(const volatile void __iomem *addr) +{ + return readl_relaxed(addr); +} + +#ifdef CONFIG_64BIT +u64 rust_helper_readq_relaxed(const volatile void __iomem *addr) +{ + return readq_relaxed(addr); +} +#endif + +void rust_helper_writeb_relaxed(u8 value, volatile void __iomem *addr) +{ + writeb_relaxed(value, addr); +} + +void rust_helper_writew_relaxed(u16 value, volatile void __iomem *addr) +{ + writew_relaxed(value, addr); +} + +void rust_helper_writel_relaxed(u32 value, volatile void __iomem *addr) +{ + writel_relaxed(value, addr); +} + +#ifdef CONFIG_64BIT +void rust_helper_writeq_relaxed(u64 value, volatile void __iomem *addr) +{ + writeq_relaxed(value, addr); +} +#endif diff --git a/rust/helpers/pci.c b/rust/helpers/pci.c new file mode 100644 index 000000000000..8ba22f911459 --- /dev/null +++ b/rust/helpers/pci.c @@ -0,0 +1,18 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/pci.h> + +void rust_helper_pci_set_drvdata(struct pci_dev *pdev, void *data) +{ + pci_set_drvdata(pdev, data); +} + +void *rust_helper_pci_get_drvdata(struct pci_dev *pdev) +{ + return pci_get_drvdata(pdev); +} + +resource_size_t rust_helper_pci_resource_len(struct pci_dev *pdev, int bar) +{ + return pci_resource_len(pdev, bar); +} diff --git a/rust/helpers/platform.c b/rust/helpers/platform.c new file mode 100644 index 000000000000..ab9b9f317301 --- /dev/null +++ b/rust/helpers/platform.c @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/platform_device.h> + +void *rust_helper_platform_get_drvdata(const struct platform_device *pdev) +{ + return platform_get_drvdata(pdev); +} + +void rust_helper_platform_set_drvdata(struct platform_device *pdev, void *data) +{ + platform_set_drvdata(pdev, data); +} diff --git a/rust/helpers/rcu.c b/rust/helpers/rcu.c new file mode 100644 index 000000000000..f1cec6583513 --- /dev/null +++ b/rust/helpers/rcu.c @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/rcupdate.h> + +void rust_helper_rcu_read_lock(void) +{ + rcu_read_lock(); +} + +void rust_helper_rcu_read_unlock(void) +{ + rcu_read_unlock(); +} diff --git a/rust/kernel/device.rs b/rust/kernel/device.rs index d5e6a19ff6b7..db2d9658ba47 100644 --- a/rust/kernel/device.rs +++ b/rust/kernel/device.rs @@ -6,6 +6,7 @@ use crate::{ bindings, + str::CStr, types::{ARef, Opaque}, }; use core::{fmt, ptr}; @@ -180,6 +181,12 @@ impl Device { ) }; } + + /// Checks if property is present or not. + pub fn property_present(&self, name: &CStr) -> bool { + // SAFETY: By the invariant of `CStr`, `name` is null-terminated. + unsafe { bindings::device_property_present(self.as_raw().cast_const(), name.as_char_ptr()) } + } } // SAFETY: Instances of `Device` are always reference-counted. diff --git a/rust/kernel/device_id.rs b/rust/kernel/device_id.rs new file mode 100644 index 000000000000..e5859217a579 --- /dev/null +++ b/rust/kernel/device_id.rs @@ -0,0 +1,165 @@ +// SPDX-License-Identifier: GPL-2.0 + +//! Generic implementation of device IDs. +//! +//! Each bus / subsystem that matches device and driver through a bus / subsystem specific ID is +//! expected to implement [`RawDeviceId`]. + +use core::mem::MaybeUninit; + +/// Marker trait to indicate a Rust device ID type represents a corresponding C device ID type. +/// +/// This is meant to be implemented by buses/subsystems so that they can use [`IdTable`] to +/// guarantee (at compile-time) zero-termination of device id tables provided by drivers. +/// +/// # Safety +/// +/// Implementers must ensure that: +/// - `Self` is layout-compatible with [`RawDeviceId::RawType`]; i.e. it's safe to transmute to +/// `RawDeviceId`. +/// +/// This requirement is needed so `IdArray::new` can convert `Self` to `RawType` when building +/// the ID table. +/// +/// Ideally, this should be achieved using a const function that does conversion instead of +/// transmute; however, const trait functions relies on `const_trait_impl` unstable feature, +/// which is broken/gone in Rust 1.73. +/// +/// - `DRIVER_DATA_OFFSET` is the offset of context/data field of the device ID (usually named +/// `driver_data`) of the device ID, the field is suitable sized to write a `usize` value. +/// +/// Similar to the previous requirement, the data should ideally be added during `Self` to +/// `RawType` conversion, but there's currently no way to do it when using traits in const. +pub unsafe trait RawDeviceId { + /// The raw type that holds the device id. + /// + /// Id tables created from [`Self`] are going to hold this type in its zero-terminated array. + type RawType: Copy; + + /// The offset to the context/data field. + const DRIVER_DATA_OFFSET: usize; + + /// The index stored at `DRIVER_DATA_OFFSET` of the implementor of the [`RawDeviceId`] trait. + fn index(&self) -> usize; +} + +/// A zero-terminated device id array. +#[repr(C)] +pub struct RawIdArray<T: RawDeviceId, const N: usize> { + ids: [T::RawType; N], + sentinel: MaybeUninit<T::RawType>, +} + +impl<T: RawDeviceId, const N: usize> RawIdArray<T, N> { + #[doc(hidden)] + pub const fn size(&self) -> usize { + core::mem::size_of::<Self>() + } +} + +/// A zero-terminated device id array, followed by context data. +#[repr(C)] +pub struct IdArray<T: RawDeviceId, U, const N: usize> { + raw_ids: RawIdArray<T, N>, + id_infos: [U; N], +} + +impl<T: RawDeviceId, U, const N: usize> IdArray<T, U, N> { + /// Creates a new instance of the array. + /// + /// The contents are derived from the given identifiers and context information. + pub const fn new(ids: [(T, U); N]) -> Self { + let mut raw_ids = [const { MaybeUninit::<T::RawType>::uninit() }; N]; + let mut infos = [const { MaybeUninit::uninit() }; N]; + + let mut i = 0usize; + while i < N { + // SAFETY: by the safety requirement of `RawDeviceId`, we're guaranteed that `T` is + // layout-wise compatible with `RawType`. + raw_ids[i] = unsafe { core::mem::transmute_copy(&ids[i].0) }; + // SAFETY: by the safety requirement of `RawDeviceId`, this would be effectively + // `raw_ids[i].driver_data = i;`. + unsafe { + raw_ids[i] + .as_mut_ptr() + .byte_offset(T::DRIVER_DATA_OFFSET as _) + .cast::<usize>() + .write(i); + } + + // SAFETY: this is effectively a move: `infos[i] = ids[i].1`. We make a copy here but + // later forget `ids`. + infos[i] = MaybeUninit::new(unsafe { core::ptr::read(&ids[i].1) }); + i += 1; + } + + core::mem::forget(ids); + + Self { + raw_ids: RawIdArray { + // SAFETY: this is effectively `array_assume_init`, which is unstable, so we use + // `transmute_copy` instead. We have initialized all elements of `raw_ids` so this + // `array_assume_init` is safe. + ids: unsafe { core::mem::transmute_copy(&raw_ids) }, + sentinel: MaybeUninit::zeroed(), + }, + // SAFETY: We have initialized all elements of `infos` so this `array_assume_init` is + // safe. + id_infos: unsafe { core::mem::transmute_copy(&infos) }, + } + } + + /// Reference to the contained [`RawIdArray`]. + pub const fn raw_ids(&self) -> &RawIdArray<T, N> { + &self.raw_ids + } +} + +/// A device id table. +/// +/// This trait is only implemented by `IdArray`. +/// +/// The purpose of this trait is to allow `&'static dyn IdArray<T, U>` to be in context when `N` in +/// `IdArray` doesn't matter. +pub trait IdTable<T: RawDeviceId, U> { + /// Obtain the pointer to the ID table. + fn as_ptr(&self) -> *const T::RawType; + + /// Obtain the pointer to the bus specific device ID from an index. + fn id(&self, index: usize) -> &T::RawType; + + /// Obtain the pointer to the driver-specific information from an index. + fn info(&self, index: usize) -> &U; +} + +impl<T: RawDeviceId, U, const N: usize> IdTable<T, U> for IdArray<T, U, N> { + fn as_ptr(&self) -> *const T::RawType { + // This cannot be `self.ids.as_ptr()`, as the return pointer must have correct provenance + // to access the sentinel. + (self as *const Self).cast() + } + + fn id(&self, index: usize) -> &T::RawType { + &self.raw_ids.ids[index] + } + + fn info(&self, index: usize) -> &U { + &self.id_infos[index] + } +} + +/// Create device table alias for modpost. +#[macro_export] +macro_rules! module_device_table { + ($table_type: literal, $module_table_name:ident, $table_name:ident) => { + #[rustfmt::skip] + #[export_name = + concat!("__mod_device_table__", $table_type, + "__", module_path!(), + "_", line!(), + "_", stringify!($table_name)) + ] + static $module_table_name: [core::mem::MaybeUninit<u8>; $table_name.raw_ids().size()] = + unsafe { core::mem::transmute_copy($table_name.raw_ids()) }; + }; +} diff --git a/rust/kernel/devres.rs b/rust/kernel/devres.rs new file mode 100644 index 000000000000..942376f6f3af --- /dev/null +++ b/rust/kernel/devres.rs @@ -0,0 +1,201 @@ +// SPDX-License-Identifier: GPL-2.0 + +//! Devres abstraction +//! +//! [`Devres`] represents an abstraction for the kernel devres (device resource management) +//! implementation. + +use crate::{ + alloc::Flags, + bindings, + device::Device, + error::{Error, Result}, + ffi::c_void, + prelude::*, + revocable::Revocable, + sync::Arc, + types::ARef, +}; + +use core::ops::Deref; + +#[pin_data] +struct DevresInner<T> { + dev: ARef<Device>, + callback: unsafe extern "C" fn(*mut c_void), + #[pin] + data: Revocable<T>, +} + +/// This abstraction is meant to be used by subsystems to containerize [`Device`] bound resources to +/// manage their lifetime. +/// +/// [`Device`] bound resources should be freed when either the resource goes out of scope or the +/// [`Device`] is unbound respectively, depending on what happens first. +/// +/// To achieve that [`Devres`] registers a devres callback on creation, which is called once the +/// [`Device`] is unbound, revoking access to the encapsulated resource (see also [`Revocable`]). +/// +/// After the [`Devres`] has been unbound it is not possible to access the encapsulated resource +/// anymore. +/// +/// [`Devres`] users should make sure to simply free the corresponding backing resource in `T`'s +/// [`Drop`] implementation. +/// +/// # Example +/// +/// ```no_run +/// # use kernel::{bindings, c_str, device::Device, devres::Devres, io::{Io, IoRaw}}; +/// # use core::ops::Deref; +/// +/// // See also [`pci::Bar`] for a real example. +/// struct IoMem<const SIZE: usize>(IoRaw<SIZE>); +/// +/// impl<const SIZE: usize> IoMem<SIZE> { +/// /// # Safety +/// /// +/// /// [`paddr`, `paddr` + `SIZE`) must be a valid MMIO region that is mappable into the CPUs +/// /// virtual address space. +/// unsafe fn new(paddr: usize) -> Result<Self>{ +/// // SAFETY: By the safety requirements of this function [`paddr`, `paddr` + `SIZE`) is +/// // valid for `ioremap`. +/// let addr = unsafe { bindings::ioremap(paddr as _, SIZE as _) }; +/// if addr.is_null() { +/// return Err(ENOMEM); +/// } +/// +/// Ok(IoMem(IoRaw::new(addr as _, SIZE)?)) +/// } +/// } +/// +/// impl<const SIZE: usize> Drop for IoMem<SIZE> { +/// fn drop(&mut self) { +/// // SAFETY: `self.0.addr()` is guaranteed to be properly mapped by `Self::new`. +/// unsafe { bindings::iounmap(self.0.addr() as _); }; +/// } +/// } +/// +/// impl<const SIZE: usize> Deref for IoMem<SIZE> { +/// type Target = Io<SIZE>; +/// +/// fn deref(&self) -> &Self::Target { +/// // SAFETY: The memory range stored in `self` has been properly mapped in `Self::new`. +/// unsafe { Io::from_raw(&self.0) } +/// } +/// } +/// # fn no_run() -> Result<(), Error> { +/// # // SAFETY: Invalid usage; just for the example to get an `ARef<Device>` instance. +/// # let dev = unsafe { Device::get_device(core::ptr::null_mut()) }; +/// +/// // SAFETY: Invalid usage for example purposes. +/// let iomem = unsafe { IoMem::<{ core::mem::size_of::<u32>() }>::new(0xBAAAAAAD)? }; +/// let devres = Devres::new(&dev, iomem, GFP_KERNEL)?; +/// +/// let res = devres.try_access().ok_or(ENXIO)?; +/// res.writel(0x42, 0x0); +/// # Ok(()) +/// # } +/// ``` +pub struct Devres<T>(Arc<DevresInner<T>>); + +impl<T> DevresInner<T> { + fn new(dev: &Device, data: T, flags: Flags) -> Result<Arc<DevresInner<T>>> { + let inner = Arc::pin_init( + pin_init!( DevresInner { + dev: dev.into(), + callback: Self::devres_callback, + data <- Revocable::new(data), + }), + flags, + )?; + + // Convert `Arc<DevresInner>` into a raw pointer and make devres own this reference until + // `Self::devres_callback` is called. + let data = inner.clone().into_raw(); + + // SAFETY: `devm_add_action` guarantees to call `Self::devres_callback` once `dev` is + // detached. + let ret = + unsafe { bindings::devm_add_action(dev.as_raw(), Some(inner.callback), data as _) }; + + if ret != 0 { + // SAFETY: We just created another reference to `inner` in order to pass it to + // `bindings::devm_add_action`. If `bindings::devm_add_action` fails, we have to drop + // this reference accordingly. + let _ = unsafe { Arc::from_raw(data) }; + return Err(Error::from_errno(ret)); + } + + Ok(inner) + } + + fn as_ptr(&self) -> *const Self { + self as _ + } + + fn remove_action(this: &Arc<Self>) { + // SAFETY: + // - `self.inner.dev` is a valid `Device`, + // - the `action` and `data` pointers are the exact same ones as given to devm_add_action() + // previously, + // - `self` is always valid, even if the action has been released already. + let ret = unsafe { + bindings::devm_remove_action_nowarn( + this.dev.as_raw(), + Some(this.callback), + this.as_ptr() as _, + ) + }; + + if ret == 0 { + // SAFETY: We leaked an `Arc` reference to devm_add_action() in `DevresInner::new`; if + // devm_remove_action_nowarn() was successful we can (and have to) claim back ownership + // of this reference. + let _ = unsafe { Arc::from_raw(this.as_ptr()) }; + } + } + + #[allow(clippy::missing_safety_doc)] + unsafe extern "C" fn devres_callback(ptr: *mut kernel::ffi::c_void) { + let ptr = ptr as *mut DevresInner<T>; + // Devres owned this memory; now that we received the callback, drop the `Arc` and hence the + // reference. + // SAFETY: Safe, since we leaked an `Arc` reference to devm_add_action() in + // `DevresInner::new`. + let inner = unsafe { Arc::from_raw(ptr) }; + + inner.data.revoke(); + } +} + +impl<T> Devres<T> { + /// Creates a new [`Devres`] instance of the given `data`. The `data` encapsulated within the + /// returned `Devres` instance' `data` will be revoked once the device is detached. + pub fn new(dev: &Device, data: T, flags: Flags) -> Result<Self> { + let inner = DevresInner::new(dev, data, flags)?; + + Ok(Devres(inner)) + } + + /// Same as [`Devres::new`], but does not return a `Devres` instance. Instead the given `data` + /// is owned by devres and will be revoked / dropped, once the device is detached. + pub fn new_foreign_owned(dev: &Device, data: T, flags: Flags) -> Result { + let _ = DevresInner::new(dev, data, flags)?; + + Ok(()) + } +} + +impl<T> Deref for Devres<T> { + type Target = Revocable<T>; + + fn deref(&self) -> &Self::Target { + &self.0.data + } +} + +impl<T> Drop for Devres<T> { + fn drop(&mut self) { + DevresInner::remove_action(&self.0); + } +} diff --git a/rust/kernel/driver.rs b/rust/kernel/driver.rs new file mode 100644 index 000000000000..2a16d5e64e6c --- /dev/null +++ b/rust/kernel/driver.rs @@ -0,0 +1,188 @@ +// SPDX-License-Identifier: GPL-2.0 + +//! Generic support for drivers of different buses (e.g., PCI, Platform, Amba, etc.). +//! +//! Each bus / subsystem is expected to implement [`RegistrationOps`], which allows drivers to +//! register using the [`Registration`] class. + +use crate::error::{Error, Result}; +use crate::{device, init::PinInit, of, str::CStr, try_pin_init, types::Opaque, ThisModule}; +use core::pin::Pin; +use macros::{pin_data, pinned_drop}; + +/// The [`RegistrationOps`] trait serves as generic interface for subsystems (e.g., PCI, Platform, +/// Amba, etc.) to provide the corresponding subsystem specific implementation to register / +/// unregister a driver of the particular type (`RegType`). +/// +/// For instance, the PCI subsystem would set `RegType` to `bindings::pci_driver` and call +/// `bindings::__pci_register_driver` from `RegistrationOps::register` and +/// `bindings::pci_unregister_driver` from `RegistrationOps::unregister`. +/// +/// # Safety +/// +/// A call to [`RegistrationOps::unregister`] for a given instance of `RegType` is only valid if a +/// preceding call to [`RegistrationOps::register`] has been successful. +pub unsafe trait RegistrationOps { + /// The type that holds information about the registration. This is typically a struct defined + /// by the C portion of the kernel. + type RegType: Default; + + /// Registers a driver. + /// + /// # Safety + /// + /// On success, `reg` must remain pinned and valid until the matching call to + /// [`RegistrationOps::unregister`]. + unsafe fn register( + reg: &Opaque<Self::RegType>, + name: &'static CStr, + module: &'static ThisModule, + ) -> Result; + + /// Unregisters a driver previously registered with [`RegistrationOps::register`]. + /// + /// # Safety + /// + /// Must only be called after a preceding successful call to [`RegistrationOps::register`] for + /// the same `reg`. + unsafe fn unregister(reg: &Opaque<Self::RegType>); +} + +/// A [`Registration`] is a generic type that represents the registration of some driver type (e.g. +/// `bindings::pci_driver`). Therefore a [`Registration`] must be initialized with a type that +/// implements the [`RegistrationOps`] trait, such that the generic `T::register` and +/// `T::unregister` calls result in the subsystem specific registration calls. +/// +///Once the `Registration` structure is dropped, the driver is unregistered. +#[pin_data(PinnedDrop)] +pub struct Registration<T: RegistrationOps> { + #[pin] + reg: Opaque<T::RegType>, +} + +// SAFETY: `Registration` has no fields or methods accessible via `&Registration`, so it is safe to +// share references to it with multiple threads as nothing can be done. +unsafe impl<T: RegistrationOps> Sync for Registration<T> {} + +// SAFETY: Both registration and unregistration are implemented in C and safe to be performed from +// any thread, so `Registration` is `Send`. +unsafe impl<T: RegistrationOps> Send for Registration<T> {} + +impl<T: RegistrationOps> Registration<T> { + /// Creates a new instance of the registration object. + pub fn new(name: &'static CStr, module: &'static ThisModule) -> impl PinInit<Self, Error> { + try_pin_init!(Self { + reg <- Opaque::try_ffi_init(|ptr: *mut T::RegType| { + // SAFETY: `try_ffi_init` guarantees that `ptr` is valid for write. + unsafe { ptr.write(T::RegType::default()) }; + + // SAFETY: `try_ffi_init` guarantees that `ptr` is valid for write, and it has + // just been initialised above, so it's also valid for read. + let drv = unsafe { &*(ptr as *const Opaque<T::RegType>) }; + + // SAFETY: `drv` is guaranteed to be pinned until `T::unregister`. + unsafe { T::register(drv, name, module) } + }), + }) + } +} + +#[pinned_drop] +impl<T: RegistrationOps> PinnedDrop for Registration<T> { + fn drop(self: Pin<&mut Self>) { + // SAFETY: The existence of `self` guarantees that `self.reg` has previously been + // successfully registered with `T::register` + unsafe { T::unregister(&self.reg) }; + } +} + +/// Declares a kernel module that exposes a single driver. +/// +/// It is meant to be used as a helper by other subsystems so they can more easily expose their own +/// macros. +#[macro_export] +macro_rules! module_driver { + (<$gen_type:ident>, $driver_ops:ty, { type: $type:ty, $($f:tt)* }) => { + type Ops<$gen_type> = $driver_ops; + + #[$crate::prelude::pin_data] + struct DriverModule { + #[pin] + _driver: $crate::driver::Registration<Ops<$type>>, + } + + impl $crate::InPlaceModule for DriverModule { + fn init( + module: &'static $crate::ThisModule + ) -> impl $crate::init::PinInit<Self, $crate::error::Error> { + $crate::try_pin_init!(Self { + _driver <- $crate::driver::Registration::new( + <Self as $crate::ModuleMetadata>::NAME, + module, + ), + }) + } + } + + $crate::prelude::module! { + type: DriverModule, + $($f)* + } + } +} + +/// The bus independent adapter to match a drivers and a devices. +/// +/// This trait should be implemented by the bus specific adapter, which represents the connection +/// of a device and a driver. +/// +/// It provides bus independent functions for device / driver interactions. +pub trait Adapter { + /// The type holding driver private data about each device id supported by the driver. + type IdInfo: 'static; + + /// The [`of::IdTable`] of the corresponding driver. + fn of_id_table() -> Option<of::IdTable<Self::IdInfo>>; + + /// Returns the driver's private data from the matching entry in the [`of::IdTable`], if any. + /// + /// If this returns `None`, it means there is no match with an entry in the [`of::IdTable`]. + #[cfg(CONFIG_OF)] + fn of_id_info(dev: &device::Device) -> Option<&'static Self::IdInfo> { + let table = Self::of_id_table()?; + + // SAFETY: + // - `table` has static lifetime, hence it's valid for read, + // - `dev` is guaranteed to be valid while it's alive, and so is `pdev.as_ref().as_raw()`. + let raw_id = unsafe { bindings::of_match_device(table.as_ptr(), dev.as_raw()) }; + + if raw_id.is_null() { + None + } else { + // SAFETY: `DeviceId` is a `#[repr(transparent)` wrapper of `struct of_device_id` and + // does not add additional invariants, so it's safe to transmute. + let id = unsafe { &*raw_id.cast::<of::DeviceId>() }; + + Some(table.info(<of::DeviceId as crate::device_id::RawDeviceId>::index(id))) + } + } + + #[cfg(not(CONFIG_OF))] + #[allow(missing_docs)] + fn of_id_info(_dev: &device::Device) -> Option<&'static Self::IdInfo> { + None + } + + /// Returns the driver's private data from the matching entry of any of the ID tables, if any. + /// + /// If this returns `None`, it means that there is no match in any of the ID tables directly + /// associated with a [`device::Device`]. + fn id_info(dev: &device::Device) -> Option<&'static Self::IdInfo> { + let id = Self::of_id_info(dev); + if id.is_some() { + return id; + } + + None + } +} diff --git a/rust/kernel/io.rs b/rust/kernel/io.rs new file mode 100644 index 000000000000..d4a73e52e3ee --- /dev/null +++ b/rust/kernel/io.rs @@ -0,0 +1,260 @@ +// SPDX-License-Identifier: GPL-2.0 + +//! Memory-mapped IO. +//! +//! C header: [`include/asm-generic/io.h`](srctree/include/asm-generic/io.h) + +use crate::error::{code::EINVAL, Result}; +use crate::{bindings, build_assert}; + +/// Raw representation of an MMIO region. +/// +/// By itself, the existence of an instance of this structure does not provide any guarantees that +/// the represented MMIO region does exist or is properly mapped. +/// +/// Instead, the bus specific MMIO implementation must convert this raw representation into an `Io` +/// instance providing the actual memory accessors. Only by the conversion into an `Io` structure +/// any guarantees are given. +pub struct IoRaw<const SIZE: usize = 0> { + addr: usize, + maxsize: usize, +} + +impl<const SIZE: usize> IoRaw<SIZE> { + /// Returns a new `IoRaw` instance on success, an error otherwise. + pub fn new(addr: usize, maxsize: usize) -> Result<Self> { + if maxsize < SIZE { + return Err(EINVAL); + } + + Ok(Self { addr, maxsize }) + } + + /// Returns the base address of the MMIO region. + #[inline] + pub fn addr(&self) -> usize { + self.addr + } + + /// Returns the maximum size of the MMIO region. + #[inline] + pub fn maxsize(&self) -> usize { + self.maxsize + } +} + +/// IO-mapped memory, starting at the base address @addr and spanning @maxlen bytes. +/// +/// The creator (usually a subsystem / bus such as PCI) is responsible for creating the +/// mapping, performing an additional region request etc. +/// +/// # Invariant +/// +/// `addr` is the start and `maxsize` the length of valid I/O mapped memory region of size +/// `maxsize`. +/// +/// # Examples +/// +/// ```no_run +/// # use kernel::{bindings, io::{Io, IoRaw}}; +/// # use core::ops::Deref; +/// +/// // See also [`pci::Bar`] for a real example. +/// struct IoMem<const SIZE: usize>(IoRaw<SIZE>); +/// +/// impl<const SIZE: usize> IoMem<SIZE> { +/// /// # Safety +/// /// +/// /// [`paddr`, `paddr` + `SIZE`) must be a valid MMIO region that is mappable into the CPUs +/// /// virtual address space. +/// unsafe fn new(paddr: usize) -> Result<Self>{ +/// // SAFETY: By the safety requirements of this function [`paddr`, `paddr` + `SIZE`) is +/// // valid for `ioremap`. +/// let addr = unsafe { bindings::ioremap(paddr as _, SIZE as _) }; +/// if addr.is_null() { +/// return Err(ENOMEM); +/// } +/// +/// Ok(IoMem(IoRaw::new(addr as _, SIZE)?)) +/// } +/// } +/// +/// impl<const SIZE: usize> Drop for IoMem<SIZE> { +/// fn drop(&mut self) { +/// // SAFETY: `self.0.addr()` is guaranteed to be properly mapped by `Self::new`. +/// unsafe { bindings::iounmap(self.0.addr() as _); }; +/// } +/// } +/// +/// impl<const SIZE: usize> Deref for IoMem<SIZE> { +/// type Target = Io<SIZE>; +/// +/// fn deref(&self) -> &Self::Target { +/// // SAFETY: The memory range stored in `self` has been properly mapped in `Self::new`. +/// unsafe { Io::from_raw(&self.0) } +/// } +/// } +/// +///# fn no_run() -> Result<(), Error> { +/// // SAFETY: Invalid usage for example purposes. +/// let iomem = unsafe { IoMem::<{ core::mem::size_of::<u32>() }>::new(0xBAAAAAAD)? }; +/// iomem.writel(0x42, 0x0); +/// assert!(iomem.try_writel(0x42, 0x0).is_ok()); +/// assert!(iomem.try_writel(0x42, 0x4).is_err()); +/// # Ok(()) +/// # } +/// ``` +#[repr(transparent)] +pub struct Io<const SIZE: usize = 0>(IoRaw<SIZE>); + +macro_rules! define_read { + ($(#[$attr:meta])* $name:ident, $try_name:ident, $type_name:ty) => { + /// Read IO data from a given offset known at compile time. + /// + /// Bound checks are performed on compile time, hence if the offset is not known at compile + /// time, the build will fail. + $(#[$attr])* + #[inline] + pub fn $name(&self, offset: usize) -> $type_name { + let addr = self.io_addr_assert::<$type_name>(offset); + + // SAFETY: By the type invariant `addr` is a valid address for MMIO operations. + unsafe { bindings::$name(addr as _) } + } + + /// Read IO data from a given offset. + /// + /// Bound checks are performed on runtime, it fails if the offset (plus the type size) is + /// out of bounds. + $(#[$attr])* + pub fn $try_name(&self, offset: usize) -> Result<$type_name> { + let addr = self.io_addr::<$type_name>(offset)?; + + // SAFETY: By the type invariant `addr` is a valid address for MMIO operations. + Ok(unsafe { bindings::$name(addr as _) }) + } + }; +} + +macro_rules! define_write { + ($(#[$attr:meta])* $name:ident, $try_name:ident, $type_name:ty) => { + /// Write IO data from a given offset known at compile time. + /// + /// Bound checks are performed on compile time, hence if the offset is not known at compile + /// time, the build will fail. + $(#[$attr])* + #[inline] + pub fn $name(&self, value: $type_name, offset: usize) { + let addr = self.io_addr_assert::<$type_name>(offset); + + // SAFETY: By the type invariant `addr` is a valid address for MMIO operations. + unsafe { bindings::$name(value, addr as _, ) } + } + + /// Write IO data from a given offset. + /// + /// Bound checks are performed on runtime, it fails if the offset (plus the type size) is + /// out of bounds. + $(#[$attr])* + pub fn $try_name(&self, value: $type_name, offset: usize) -> Result { + let addr = self.io_addr::<$type_name>(offset)?; + + // SAFETY: By the type invariant `addr` is a valid address for MMIO operations. + unsafe { bindings::$name(value, addr as _) } + Ok(()) + } + }; +} + +impl<const SIZE: usize> Io<SIZE> { + /// Converts an `IoRaw` into an `Io` instance, providing the accessors to the MMIO mapping. + /// + /// # Safety + /// + /// Callers must ensure that `addr` is the start of a valid I/O mapped memory region of size + /// `maxsize`. + pub unsafe fn from_raw(raw: &IoRaw<SIZE>) -> &Self { + // SAFETY: `Io` is a transparent wrapper around `IoRaw`. + unsafe { &*core::ptr::from_ref(raw).cast() } + } + + /// Returns the base address of this mapping. + #[inline] + pub fn addr(&self) -> usize { + self.0.addr() + } + + /// Returns the maximum size of this mapping. + #[inline] + pub fn maxsize(&self) -> usize { + self.0.maxsize() + } + + #[inline] + const fn offset_valid<U>(offset: usize, size: usize) -> bool { + let type_size = core::mem::size_of::<U>(); + if let Some(end) = offset.checked_add(type_size) { + end <= size && offset % type_size == 0 + } else { + false + } + } + + #[inline] + fn io_addr<U>(&self, offset: usize) -> Result<usize> { + if !Self::offset_valid::<U>(offset, self.maxsize()) { + return Err(EINVAL); + } + + // Probably no need to check, since the safety requirements of `Self::new` guarantee that + // this can't overflow. + self.addr().checked_add(offset).ok_or(EINVAL) + } + + #[inline] + fn io_addr_assert<U>(&self, offset: usize) -> usize { + build_assert!(Self::offset_valid::<U>(offset, SIZE)); + + self.addr() + offset + } + + define_read!(readb, try_readb, u8); + define_read!(readw, try_readw, u16); + define_read!(readl, try_readl, u32); + define_read!( + #[cfg(CONFIG_64BIT)] + readq, + try_readq, + u64 + ); + + define_read!(readb_relaxed, try_readb_relaxed, u8); + define_read!(readw_relaxed, try_readw_relaxed, u16); + define_read!(readl_relaxed, try_readl_relaxed, u32); + define_read!( + #[cfg(CONFIG_64BIT)] + readq_relaxed, + try_readq_relaxed, + u64 + ); + + define_write!(writeb, try_writeb, u8); + define_write!(writew, try_writew, u16); + define_write!(writel, try_writel, u32); + define_write!( + #[cfg(CONFIG_64BIT)] + writeq, + try_writeq, + u64 + ); + + define_write!(writeb_relaxed, try_writeb_relaxed, u8); + define_write!(writew_relaxed, try_writew_relaxed, u16); + define_write!(writel_relaxed, try_writel_relaxed, u32); + define_write!( + #[cfg(CONFIG_64BIT)] + writeq_relaxed, + try_writeq_relaxed, + u64 + ); +} diff --git a/rust/kernel/lib.rs b/rust/kernel/lib.rs index 545d1170ee63..496ed32b0911 100644 --- a/rust/kernel/lib.rs +++ b/rust/kernel/lib.rs @@ -19,6 +19,11 @@ #![cfg_attr(not(CONFIG_RUSTC_HAS_COERCE_POINTEE), feature(unsize))] #![feature(inline_const)] #![feature(lint_reasons)] +// Stable in Rust 1.83 +#![feature(const_maybe_uninit_as_mut_ptr)] +#![feature(const_mut_refs)] +#![feature(const_ptr_write)] +#![feature(const_refs_to_cell)] // Ensure conditional compilation based on the kernel configuration works; // otherwise we may silently break things like initcall handling. @@ -37,11 +42,15 @@ pub mod block; pub mod build_assert; pub mod cred; pub mod device; +pub mod device_id; +pub mod devres; +pub mod driver; pub mod error; #[cfg(CONFIG_RUST_FW_LOADER_ABSTRACTIONS)] pub mod firmware; pub mod fs; pub mod init; +pub mod io; pub mod ioctl; pub mod jump_label; #[cfg(CONFIG_KUNIT)] @@ -50,11 +59,16 @@ pub mod list; pub mod miscdevice; #[cfg(CONFIG_NET)] pub mod net; +pub mod of; pub mod page; +#[cfg(CONFIG_PCI)] +pub mod pci; pub mod pid_namespace; +pub mod platform; pub mod prelude; pub mod print; pub mod rbtree; +pub mod revocable; pub mod security; pub mod seq_file; pub mod sizes; @@ -115,6 +129,12 @@ impl<T: Module> InPlaceModule for T { } } +/// Metadata attached to a [`Module`] or [`InPlaceModule`]. +pub trait ModuleMetadata { + /// The name of the module as specified in the `module!` macro. + const NAME: &'static crate::str::CStr; +} + /// Equivalent to `THIS_MODULE` in the C API. /// /// C header: [`include/linux/init.h`](srctree/include/linux/init.h) diff --git a/rust/kernel/miscdevice.rs b/rust/kernel/miscdevice.rs index b3a6cc50b240..e14433b2ab9d 100644 --- a/rust/kernel/miscdevice.rs +++ b/rust/kernel/miscdevice.rs @@ -10,9 +10,12 @@ use crate::{ bindings, + device::Device, error::{to_result, Error, Result, VTABLE_DEFAULT_ERROR}, ffi::{c_int, c_long, c_uint, c_ulong}, + fs::File, prelude::*, + seq_file::SeqFile, str::CStr, types::{ForeignOwnable, Opaque}, }; @@ -80,6 +83,16 @@ impl<T: MiscDevice> MiscDeviceRegistration<T> { pub fn as_raw(&self) -> *mut bindings::miscdevice { self.inner.get() } + + /// Access the `this_device` field. + pub fn device(&self) -> &Device { + // SAFETY: This can only be called after a successful register(), which always + // initialises `this_device` with a valid device. Furthermore, the signature of this + // function tells the borrow-checker that the `&Device` reference must not outlive the + // `&MiscDeviceRegistration<T>` used to obtain it, so the last use of the reference must be + // before the underlying `struct miscdevice` is destroyed. + unsafe { Device::as_ref((*self.as_raw()).this_device) } + } } #[pinned_drop] @@ -92,17 +105,17 @@ impl<T> PinnedDrop for MiscDeviceRegistration<T> { /// Trait implemented by the private data of an open misc device. #[vtable] -pub trait MiscDevice { +pub trait MiscDevice: Sized { /// What kind of pointer should `Self` be wrapped in. type Ptr: ForeignOwnable + Send + Sync; /// Called when the misc device is opened. /// /// The returned pointer will be stored as the private data for the file. - fn open() -> Result<Self::Ptr>; + fn open(_file: &File, _misc: &MiscDeviceRegistration<Self>) -> Result<Self::Ptr>; /// Called when the misc device is released. - fn release(device: Self::Ptr) { + fn release(device: Self::Ptr, _file: &File) { drop(device); } @@ -113,6 +126,7 @@ pub trait MiscDevice { /// [`kernel::ioctl`]: mod@crate::ioctl fn ioctl( _device: <Self::Ptr as ForeignOwnable>::Borrowed<'_>, + _file: &File, _cmd: u32, _arg: usize, ) -> Result<isize> { @@ -129,11 +143,21 @@ pub trait MiscDevice { #[cfg(CONFIG_COMPAT)] fn compat_ioctl( _device: <Self::Ptr as ForeignOwnable>::Borrowed<'_>, + _file: &File, _cmd: u32, _arg: usize, ) -> Result<isize> { build_error!(VTABLE_DEFAULT_ERROR) } + + /// Show info for this fd. + fn show_fdinfo( + _device: <Self::Ptr as ForeignOwnable>::Borrowed<'_>, + _m: &SeqFile, + _file: &File, + ) { + build_error!(VTABLE_DEFAULT_ERROR) + } } const fn create_vtable<T: MiscDevice>() -> &'static bindings::file_operations { @@ -161,6 +185,7 @@ const fn create_vtable<T: MiscDevice>() -> &'static bindings::file_operations { } else { None }, + show_fdinfo: maybe_fn(T::HAS_SHOW_FDINFO, fops_show_fdinfo::<T>), // SAFETY: All zeros is a valid value for `bindings::file_operations`. ..unsafe { MaybeUninit::zeroed().assume_init() } }; @@ -175,21 +200,38 @@ const fn create_vtable<T: MiscDevice>() -> &'static bindings::file_operations { /// The file must be associated with a `MiscDeviceRegistration<T>`. unsafe extern "C" fn fops_open<T: MiscDevice>( inode: *mut bindings::inode, - file: *mut bindings::file, + raw_file: *mut bindings::file, ) -> c_int { // SAFETY: The pointers are valid and for a file being opened. - let ret = unsafe { bindings::generic_file_open(inode, file) }; + let ret = unsafe { bindings::generic_file_open(inode, raw_file) }; if ret != 0 { return ret; } - let ptr = match T::open() { + // SAFETY: The open call of a file can access the private data. + let misc_ptr = unsafe { (*raw_file).private_data }; + + // SAFETY: This is a miscdevice, so `misc_open()` set the private data to a pointer to the + // associated `struct miscdevice` before calling into this method. Furthermore, `misc_open()` + // ensures that the miscdevice can't be unregistered and freed during this call to `fops_open`. + let misc = unsafe { &*misc_ptr.cast::<MiscDeviceRegistration<T>>() }; + + // SAFETY: + // * This underlying file is valid for (much longer than) the duration of `T::open`. + // * There is no active fdget_pos region on the file on this thread. + let file = unsafe { File::from_raw_file(raw_file) }; + + let ptr = match T::open(file, misc) { Ok(ptr) => ptr, Err(err) => return err.to_errno(), }; - // SAFETY: The open call of a file owns the private data. - unsafe { (*file).private_data = ptr.into_foreign() }; + // This overwrites the private data with the value specified by the user, changing the type of + // this file's private data. All future accesses to the private data is performed by other + // fops_* methods in this file, which all correctly cast the private data to the new type. + // + // SAFETY: The open call of a file can access the private data. + unsafe { (*raw_file).private_data = ptr.into_foreign() }; 0 } @@ -207,7 +249,10 @@ unsafe extern "C" fn fops_release<T: MiscDevice>( // SAFETY: The release call of a file owns the private data. let ptr = unsafe { <T::Ptr as ForeignOwnable>::from_foreign(private) }; - T::release(ptr); + // SAFETY: + // * The file is valid for the duration of this call. + // * There is no active fdget_pos region on the file on this thread. + T::release(ptr, unsafe { File::from_raw_file(file) }); 0 } @@ -225,7 +270,12 @@ unsafe extern "C" fn fops_ioctl<T: MiscDevice>( // SAFETY: Ioctl calls can borrow the private data of the file. let device = unsafe { <T::Ptr as ForeignOwnable>::borrow(private) }; - match T::ioctl(device, cmd, arg) { + // SAFETY: + // * The file is valid for the duration of this call. + // * There is no active fdget_pos region on the file on this thread. + let file = unsafe { File::from_raw_file(file) }; + + match T::ioctl(device, file, cmd, arg) { Ok(ret) => ret as c_long, Err(err) => err.to_errno() as c_long, } @@ -245,8 +295,36 @@ unsafe extern "C" fn fops_compat_ioctl<T: MiscDevice>( // SAFETY: Ioctl calls can borrow the private data of the file. let device = unsafe { <T::Ptr as ForeignOwnable>::borrow(private) }; - match T::compat_ioctl(device, cmd, arg) { + // SAFETY: + // * The file is valid for the duration of this call. + // * There is no active fdget_pos region on the file on this thread. + let file = unsafe { File::from_raw_file(file) }; + + match T::compat_ioctl(device, file, cmd, arg) { Ok(ret) => ret as c_long, Err(err) => err.to_errno() as c_long, } } + +/// # Safety +/// +/// - `file` must be a valid file that is associated with a `MiscDeviceRegistration<T>`. +/// - `seq_file` must be a valid `struct seq_file` that we can write to. +unsafe extern "C" fn fops_show_fdinfo<T: MiscDevice>( + seq_file: *mut bindings::seq_file, + file: *mut bindings::file, +) { + // SAFETY: The release call of a file owns the private data. + let private = unsafe { (*file).private_data }; + // SAFETY: Ioctl calls can borrow the private data of the file. + let device = unsafe { <T::Ptr as ForeignOwnable>::borrow(private) }; + // SAFETY: + // * The file is valid for the duration of this call. + // * There is no active fdget_pos region on the file on this thread. + let file = unsafe { File::from_raw_file(file) }; + // SAFETY: The caller ensures that the pointer is valid and exclusive for the duration in which + // this method is called. + let m = unsafe { SeqFile::from_raw(seq_file) }; + + T::show_fdinfo(device, m, file); +} diff --git a/rust/kernel/of.rs b/rust/kernel/of.rs new file mode 100644 index 000000000000..04f2d8ef29cb --- /dev/null +++ b/rust/kernel/of.rs @@ -0,0 +1,60 @@ +// SPDX-License-Identifier: GPL-2.0 + +//! Device Tree / Open Firmware abstractions. + +use crate::{bindings, device_id::RawDeviceId, prelude::*}; + +/// IdTable type for OF drivers. +pub type IdTable<T> = &'static dyn kernel::device_id::IdTable<DeviceId, T>; + +/// An open firmware device id. +#[repr(transparent)] +#[derive(Clone, Copy)] +pub struct DeviceId(bindings::of_device_id); + +// SAFETY: +// * `DeviceId` is a `#[repr(transparent)` wrapper of `struct of_device_id` and does not add +// additional invariants, so it's safe to transmute to `RawType`. +// * `DRIVER_DATA_OFFSET` is the offset to the `data` field. +unsafe impl RawDeviceId for DeviceId { + type RawType = bindings::of_device_id; + + const DRIVER_DATA_OFFSET: usize = core::mem::offset_of!(bindings::of_device_id, data); + + fn index(&self) -> usize { + self.0.data as _ + } +} + +impl DeviceId { + /// Create a new device id from an OF 'compatible' string. + pub const fn new(compatible: &'static CStr) -> Self { + let src = compatible.as_bytes_with_nul(); + // Replace with `bindings::of_device_id::default()` once stabilized for `const`. + // SAFETY: FFI type is valid to be zero-initialized. + let mut of: bindings::of_device_id = unsafe { core::mem::zeroed() }; + + // TODO: Use `clone_from_slice` once the corresponding types do match. + let mut i = 0; + while i < src.len() { + of.compatible[i] = src[i] as _; + i += 1; + } + + Self(of) + } +} + +/// Create an OF `IdTable` with an "alias" for modpost. +#[macro_export] +macro_rules! of_device_table { + ($table_name:ident, $module_table_name:ident, $id_info_type: ty, $table_data: expr) => { + const $table_name: $crate::device_id::IdArray< + $crate::of::DeviceId, + $id_info_type, + { $table_data.len() }, + > = $crate::device_id::IdArray::new($table_data); + + $crate::module_device_table!("of", $module_table_name, $table_name); + }; +} diff --git a/rust/kernel/pci.rs b/rust/kernel/pci.rs new file mode 100644 index 000000000000..4c98b5b9aa1e --- /dev/null +++ b/rust/kernel/pci.rs @@ -0,0 +1,434 @@ +// SPDX-License-Identifier: GPL-2.0 + +//! Abstractions for the PCI bus. +//! +//! C header: [`include/linux/pci.h`](srctree/include/linux/pci.h) + +use crate::{ + alloc::flags::*, + bindings, container_of, device, + device_id::RawDeviceId, + devres::Devres, + driver, + error::{to_result, Result}, + io::Io, + io::IoRaw, + str::CStr, + types::{ARef, ForeignOwnable, Opaque}, + ThisModule, +}; +use core::{ops::Deref, ptr::addr_of_mut}; +use kernel::prelude::*; + +/// An adapter for the registration of PCI drivers. +pub struct Adapter<T: Driver>(T); + +// SAFETY: A call to `unregister` for a given instance of `RegType` is guaranteed to be valid if +// a preceding call to `register` has been successful. +unsafe impl<T: Driver + 'static> driver::RegistrationOps for Adapter<T> { + type RegType = bindings::pci_driver; + + unsafe fn register( + pdrv: &Opaque<Self::RegType>, + name: &'static CStr, + module: &'static ThisModule, + ) -> Result { + // SAFETY: It's safe to set the fields of `struct pci_driver` on initialization. + unsafe { + (*pdrv.get()).name = name.as_char_ptr(); + (*pdrv.get()).probe = Some(Self::probe_callback); + (*pdrv.get()).remove = Some(Self::remove_callback); + (*pdrv.get()).id_table = T::ID_TABLE.as_ptr(); + } + + // SAFETY: `pdrv` is guaranteed to be a valid `RegType`. + to_result(unsafe { + bindings::__pci_register_driver(pdrv.get(), module.0, name.as_char_ptr()) + }) + } + + unsafe fn unregister(pdrv: &Opaque<Self::RegType>) { + // SAFETY: `pdrv` is guaranteed to be a valid `RegType`. + unsafe { bindings::pci_unregister_driver(pdrv.get()) } + } +} + +impl<T: Driver + 'static> Adapter<T> { + extern "C" fn probe_callback( + pdev: *mut bindings::pci_dev, + id: *const bindings::pci_device_id, + ) -> kernel::ffi::c_int { + // SAFETY: The PCI bus only ever calls the probe callback with a valid pointer to a + // `struct pci_dev`. + let dev = unsafe { device::Device::get_device(addr_of_mut!((*pdev).dev)) }; + // SAFETY: `dev` is guaranteed to be embedded in a valid `struct pci_dev` by the call + // above. + let mut pdev = unsafe { Device::from_dev(dev) }; + + // SAFETY: `DeviceId` is a `#[repr(transparent)` wrapper of `struct pci_device_id` and + // does not add additional invariants, so it's safe to transmute. + let id = unsafe { &*id.cast::<DeviceId>() }; + let info = T::ID_TABLE.info(id.index()); + + match T::probe(&mut pdev, info) { + Ok(data) => { + // Let the `struct pci_dev` own a reference of the driver's private data. + // SAFETY: By the type invariant `pdev.as_raw` returns a valid pointer to a + // `struct pci_dev`. + unsafe { bindings::pci_set_drvdata(pdev.as_raw(), data.into_foreign() as _) }; + } + Err(err) => return Error::to_errno(err), + } + + 0 + } + + extern "C" fn remove_callback(pdev: *mut bindings::pci_dev) { + // SAFETY: The PCI bus only ever calls the remove callback with a valid pointer to a + // `struct pci_dev`. + let ptr = unsafe { bindings::pci_get_drvdata(pdev) }; + + // SAFETY: `remove_callback` is only ever called after a successful call to + // `probe_callback`, hence it's guaranteed that `ptr` points to a valid and initialized + // `KBox<T>` pointer created through `KBox::into_foreign`. + let _ = unsafe { KBox::<T>::from_foreign(ptr) }; + } +} + +/// Declares a kernel module that exposes a single PCI driver. +/// +/// # Example +/// +///```ignore +/// kernel::module_pci_driver! { +/// type: MyDriver, +/// name: "Module name", +/// author: "Author name", +/// description: "Description", +/// license: "GPL v2", +/// } +///``` +#[macro_export] +macro_rules! module_pci_driver { +($($f:tt)*) => { + $crate::module_driver!(<T>, $crate::pci::Adapter<T>, { $($f)* }); +}; +} + +/// Abstraction for bindings::pci_device_id. +#[repr(transparent)] +#[derive(Clone, Copy)] +pub struct DeviceId(bindings::pci_device_id); + +impl DeviceId { + const PCI_ANY_ID: u32 = !0; + + /// Equivalent to C's `PCI_DEVICE` macro. + /// + /// Create a new `pci::DeviceId` from a vendor and device ID number. + pub const fn from_id(vendor: u32, device: u32) -> Self { + Self(bindings::pci_device_id { + vendor, + device, + subvendor: DeviceId::PCI_ANY_ID, + subdevice: DeviceId::PCI_ANY_ID, + class: 0, + class_mask: 0, + driver_data: 0, + override_only: 0, + }) + } + + /// Equivalent to C's `PCI_DEVICE_CLASS` macro. + /// + /// Create a new `pci::DeviceId` from a class number and mask. + pub const fn from_class(class: u32, class_mask: u32) -> Self { + Self(bindings::pci_device_id { + vendor: DeviceId::PCI_ANY_ID, + device: DeviceId::PCI_ANY_ID, + subvendor: DeviceId::PCI_ANY_ID, + subdevice: DeviceId::PCI_ANY_ID, + class, + class_mask, + driver_data: 0, + override_only: 0, + }) + } +} + +// SAFETY: +// * `DeviceId` is a `#[repr(transparent)` wrapper of `pci_device_id` and does not add +// additional invariants, so it's safe to transmute to `RawType`. +// * `DRIVER_DATA_OFFSET` is the offset to the `driver_data` field. +unsafe impl RawDeviceId for DeviceId { + type RawType = bindings::pci_device_id; + + const DRIVER_DATA_OFFSET: usize = core::mem::offset_of!(bindings::pci_device_id, driver_data); + + fn index(&self) -> usize { + self.0.driver_data as _ + } +} + +/// IdTable type for PCI +pub type IdTable<T> = &'static dyn kernel::device_id::IdTable<DeviceId, T>; + +/// Create a PCI `IdTable` with its alias for modpost. +#[macro_export] +macro_rules! pci_device_table { + ($table_name:ident, $module_table_name:ident, $id_info_type: ty, $table_data: expr) => { + const $table_name: $crate::device_id::IdArray< + $crate::pci::DeviceId, + $id_info_type, + { $table_data.len() }, + > = $crate::device_id::IdArray::new($table_data); + + $crate::module_device_table!("pci", $module_table_name, $table_name); + }; +} + +/// The PCI driver trait. +/// +/// # Example +/// +///``` +/// # use kernel::{bindings, pci}; +/// +/// struct MyDriver; +/// +/// kernel::pci_device_table!( +/// PCI_TABLE, +/// MODULE_PCI_TABLE, +/// <MyDriver as pci::Driver>::IdInfo, +/// [ +/// (pci::DeviceId::from_id(bindings::PCI_VENDOR_ID_REDHAT, bindings::PCI_ANY_ID as _), ()) +/// ] +/// ); +/// +/// impl pci::Driver for MyDriver { +/// type IdInfo = (); +/// const ID_TABLE: pci::IdTable<Self::IdInfo> = &PCI_TABLE; +/// +/// fn probe( +/// _pdev: &mut pci::Device, +/// _id_info: &Self::IdInfo, +/// ) -> Result<Pin<KBox<Self>>> { +/// Err(ENODEV) +/// } +/// } +///``` +/// Drivers must implement this trait in order to get a PCI driver registered. Please refer to the +/// `Adapter` documentation for an example. +pub trait Driver { + /// The type holding information about each device id supported by the driver. + /// + /// TODO: Use associated_type_defaults once stabilized: + /// + /// type IdInfo: 'static = (); + type IdInfo: 'static; + + /// The table of device ids supported by the driver. + const ID_TABLE: IdTable<Self::IdInfo>; + + /// PCI driver probe. + /// + /// Called when a new platform device is added or discovered. + /// Implementers should attempt to initialize the device here. + fn probe(dev: &mut Device, id_info: &Self::IdInfo) -> Result<Pin<KBox<Self>>>; +} + +/// The PCI device representation. +/// +/// A PCI device is based on an always reference counted `device:Device` instance. Cloning a PCI +/// device, hence, also increments the base device' reference count. +/// +/// # Invariants +/// +/// `Device` hold a valid reference of `ARef<device::Device>` whose underlying `struct device` is a +/// member of a `struct pci_dev`. +#[derive(Clone)] +pub struct Device(ARef<device::Device>); + +/// A PCI BAR to perform I/O-Operations on. +/// +/// # Invariants +/// +/// `Bar` always holds an `IoRaw` inststance that holds a valid pointer to the start of the I/O +/// memory mapped PCI bar and its size. +pub struct Bar<const SIZE: usize = 0> { + pdev: Device, + io: IoRaw<SIZE>, + num: i32, +} + +impl<const SIZE: usize> Bar<SIZE> { + fn new(pdev: Device, num: u32, name: &CStr) -> Result<Self> { + let len = pdev.resource_len(num)?; + if len == 0 { + return Err(ENOMEM); + } + + // Convert to `i32`, since that's what all the C bindings use. + let num = i32::try_from(num)?; + + // SAFETY: + // `pdev` is valid by the invariants of `Device`. + // `num` is checked for validity by a previous call to `Device::resource_len`. + // `name` is always valid. + let ret = unsafe { bindings::pci_request_region(pdev.as_raw(), num, name.as_char_ptr()) }; + if ret != 0 { + return Err(EBUSY); + } + + // SAFETY: + // `pdev` is valid by the invariants of `Device`. + // `num` is checked for validity by a previous call to `Device::resource_len`. + // `name` is always valid. + let ioptr: usize = unsafe { bindings::pci_iomap(pdev.as_raw(), num, 0) } as usize; + if ioptr == 0 { + // SAFETY: + // `pdev` valid by the invariants of `Device`. + // `num` is checked for validity by a previous call to `Device::resource_len`. + unsafe { bindings::pci_release_region(pdev.as_raw(), num) }; + return Err(ENOMEM); + } + + let io = match IoRaw::new(ioptr, len as usize) { + Ok(io) => io, + Err(err) => { + // SAFETY: + // `pdev` is valid by the invariants of `Device`. + // `ioptr` is guaranteed to be the start of a valid I/O mapped memory region. + // `num` is checked for validity by a previous call to `Device::resource_len`. + unsafe { Self::do_release(&pdev, ioptr, num) }; + return Err(err); + } + }; + + Ok(Bar { pdev, io, num }) + } + + /// # Safety + /// + /// `ioptr` must be a valid pointer to the memory mapped PCI bar number `num`. + unsafe fn do_release(pdev: &Device, ioptr: usize, num: i32) { + // SAFETY: + // `pdev` is valid by the invariants of `Device`. + // `ioptr` is valid by the safety requirements. + // `num` is valid by the safety requirements. + unsafe { + bindings::pci_iounmap(pdev.as_raw(), ioptr as _); + bindings::pci_release_region(pdev.as_raw(), num); + } + } + + fn release(&self) { + // SAFETY: The safety requirements are guaranteed by the type invariant of `self.pdev`. + unsafe { Self::do_release(&self.pdev, self.io.addr(), self.num) }; + } +} + +impl Bar { + fn index_is_valid(index: u32) -> bool { + // A `struct pci_dev` owns an array of resources with at most `PCI_NUM_RESOURCES` entries. + index < bindings::PCI_NUM_RESOURCES + } +} + +impl<const SIZE: usize> Drop for Bar<SIZE> { + fn drop(&mut self) { + self.release(); + } +} + +impl<const SIZE: usize> Deref for Bar<SIZE> { + type Target = Io<SIZE>; + + fn deref(&self) -> &Self::Target { + // SAFETY: By the type invariant of `Self`, the MMIO range in `self.io` is properly mapped. + unsafe { Io::from_raw(&self.io) } + } +} + +impl Device { + /// Create a PCI Device instance from an existing `device::Device`. + /// + /// # Safety + /// + /// `dev` must be an `ARef<device::Device>` whose underlying `bindings::device` is a member of + /// a `bindings::pci_dev`. + pub unsafe fn from_dev(dev: ARef<device::Device>) -> Self { + Self(dev) + } + + fn as_raw(&self) -> *mut bindings::pci_dev { + // SAFETY: By the type invariant `self.0.as_raw` is a pointer to the `struct device` + // embedded in `struct pci_dev`. + unsafe { container_of!(self.0.as_raw(), bindings::pci_dev, dev) as _ } + } + + /// Returns the PCI vendor ID. + pub fn vendor_id(&self) -> u16 { + // SAFETY: `self.as_raw` is a valid pointer to a `struct pci_dev`. + unsafe { (*self.as_raw()).vendor } + } + + /// Returns the PCI device ID. + pub fn device_id(&self) -> u16 { + // SAFETY: `self.as_raw` is a valid pointer to a `struct pci_dev`. + unsafe { (*self.as_raw()).device } + } + + /// Enable memory resources for this device. + pub fn enable_device_mem(&self) -> Result { + // SAFETY: `self.as_raw` is guaranteed to be a pointer to a valid `struct pci_dev`. + let ret = unsafe { bindings::pci_enable_device_mem(self.as_raw()) }; + if ret != 0 { + Err(Error::from_errno(ret)) + } else { + Ok(()) + } + } + + /// Enable bus-mastering for this device. + pub fn set_master(&self) { + // SAFETY: `self.as_raw` is guaranteed to be a pointer to a valid `struct pci_dev`. + unsafe { bindings::pci_set_master(self.as_raw()) }; + } + + /// Returns the size of the given PCI bar resource. + pub fn resource_len(&self, bar: u32) -> Result<bindings::resource_size_t> { + if !Bar::index_is_valid(bar) { + return Err(EINVAL); + } + + // SAFETY: + // - `bar` is a valid bar number, as guaranteed by the above call to `Bar::index_is_valid`, + // - by its type invariant `self.as_raw` is always a valid pointer to a `struct pci_dev`. + Ok(unsafe { bindings::pci_resource_len(self.as_raw(), bar.try_into()?) }) + } + + /// Mapps an entire PCI-BAR after performing a region-request on it. I/O operation bound checks + /// can be performed on compile time for offsets (plus the requested type size) < SIZE. + pub fn iomap_region_sized<const SIZE: usize>( + &self, + bar: u32, + name: &CStr, + ) -> Result<Devres<Bar<SIZE>>> { + let bar = Bar::<SIZE>::new(self.clone(), bar, name)?; + let devres = Devres::new(self.as_ref(), bar, GFP_KERNEL)?; + + Ok(devres) + } + + /// Mapps an entire PCI-BAR after performing a region-request on it. + pub fn iomap_region(&self, bar: u32, name: &CStr) -> Result<Devres<Bar>> { + self.iomap_region_sized::<0>(bar, name) + } +} + +impl AsRef<device::Device> for Device { + fn as_ref(&self) -> &device::Device { + &self.0 + } +} diff --git a/rust/kernel/platform.rs b/rust/kernel/platform.rs new file mode 100644 index 000000000000..50e6b0421813 --- /dev/null +++ b/rust/kernel/platform.rs @@ -0,0 +1,200 @@ +// SPDX-License-Identifier: GPL-2.0 + +//! Abstractions for the platform bus. +//! +//! C header: [`include/linux/platform_device.h`](srctree/include/linux/platform_device.h) + +use crate::{ + bindings, container_of, device, driver, + error::{to_result, Result}, + of, + prelude::*, + str::CStr, + types::{ARef, ForeignOwnable, Opaque}, + ThisModule, +}; + +use core::ptr::addr_of_mut; + +/// An adapter for the registration of platform drivers. +pub struct Adapter<T: Driver>(T); + +// SAFETY: A call to `unregister` for a given instance of `RegType` is guaranteed to be valid if +// a preceding call to `register` has been successful. +unsafe impl<T: Driver + 'static> driver::RegistrationOps for Adapter<T> { + type RegType = bindings::platform_driver; + + unsafe fn register( + pdrv: &Opaque<Self::RegType>, + name: &'static CStr, + module: &'static ThisModule, + ) -> Result { + let of_table = match T::OF_ID_TABLE { + Some(table) => table.as_ptr(), + None => core::ptr::null(), + }; + + // SAFETY: It's safe to set the fields of `struct platform_driver` on initialization. + unsafe { + (*pdrv.get()).driver.name = name.as_char_ptr(); + (*pdrv.get()).probe = Some(Self::probe_callback); + (*pdrv.get()).remove = Some(Self::remove_callback); + (*pdrv.get()).driver.of_match_table = of_table; + } + + // SAFETY: `pdrv` is guaranteed to be a valid `RegType`. + to_result(unsafe { bindings::__platform_driver_register(pdrv.get(), module.0) }) + } + + unsafe fn unregister(pdrv: &Opaque<Self::RegType>) { + // SAFETY: `pdrv` is guaranteed to be a valid `RegType`. + unsafe { bindings::platform_driver_unregister(pdrv.get()) }; + } +} + +impl<T: Driver + 'static> Adapter<T> { + extern "C" fn probe_callback(pdev: *mut bindings::platform_device) -> kernel::ffi::c_int { + // SAFETY: The platform bus only ever calls the probe callback with a valid `pdev`. + let dev = unsafe { device::Device::get_device(addr_of_mut!((*pdev).dev)) }; + // SAFETY: `dev` is guaranteed to be embedded in a valid `struct platform_device` by the + // call above. + let mut pdev = unsafe { Device::from_dev(dev) }; + + let info = <Self as driver::Adapter>::id_info(pdev.as_ref()); + match T::probe(&mut pdev, info) { + Ok(data) => { + // Let the `struct platform_device` own a reference of the driver's private data. + // SAFETY: By the type invariant `pdev.as_raw` returns a valid pointer to a + // `struct platform_device`. + unsafe { bindings::platform_set_drvdata(pdev.as_raw(), data.into_foreign() as _) }; + } + Err(err) => return Error::to_errno(err), + } + + 0 + } + + extern "C" fn remove_callback(pdev: *mut bindings::platform_device) { + // SAFETY: `pdev` is a valid pointer to a `struct platform_device`. + let ptr = unsafe { bindings::platform_get_drvdata(pdev) }; + + // SAFETY: `remove_callback` is only ever called after a successful call to + // `probe_callback`, hence it's guaranteed that `ptr` points to a valid and initialized + // `KBox<T>` pointer created through `KBox::into_foreign`. + let _ = unsafe { KBox::<T>::from_foreign(ptr) }; + } +} + +impl<T: Driver + 'static> driver::Adapter for Adapter<T> { + type IdInfo = T::IdInfo; + + fn of_id_table() -> Option<of::IdTable<Self::IdInfo>> { + T::OF_ID_TABLE + } +} + +/// Declares a kernel module that exposes a single platform driver. +/// +/// # Examples +/// +/// ```ignore +/// kernel::module_platform_driver! { +/// type: MyDriver, +/// name: "Module name", +/// author: "Author name", +/// description: "Description", +/// license: "GPL v2", +/// } +/// ``` +#[macro_export] +macro_rules! module_platform_driver { + ($($f:tt)*) => { + $crate::module_driver!(<T>, $crate::platform::Adapter<T>, { $($f)* }); + }; +} + +/// The platform driver trait. +/// +/// Drivers must implement this trait in order to get a platform driver registered. +/// +/// # Example +/// +///``` +/// # use kernel::{bindings, c_str, of, platform}; +/// +/// struct MyDriver; +/// +/// kernel::of_device_table!( +/// OF_TABLE, +/// MODULE_OF_TABLE, +/// <MyDriver as platform::Driver>::IdInfo, +/// [ +/// (of::DeviceId::new(c_str!("test,device")), ()) +/// ] +/// ); +/// +/// impl platform::Driver for MyDriver { +/// type IdInfo = (); +/// const OF_ID_TABLE: Option<of::IdTable<Self::IdInfo>> = Some(&OF_TABLE); +/// +/// fn probe( +/// _pdev: &mut platform::Device, +/// _id_info: Option<&Self::IdInfo>, +/// ) -> Result<Pin<KBox<Self>>> { +/// Err(ENODEV) +/// } +/// } +///``` +pub trait Driver { + /// The type holding driver private data about each device id supported by the driver. + /// + /// TODO: Use associated_type_defaults once stabilized: + /// + /// type IdInfo: 'static = (); + type IdInfo: 'static; + + /// The table of OF device ids supported by the driver. + const OF_ID_TABLE: Option<of::IdTable<Self::IdInfo>>; + + /// Platform driver probe. + /// + /// Called when a new platform device is added or discovered. + /// Implementers should attempt to initialize the device here. + fn probe(dev: &mut Device, id_info: Option<&Self::IdInfo>) -> Result<Pin<KBox<Self>>>; +} + +/// The platform device representation. +/// +/// A platform device is based on an always reference counted `device:Device` instance. Cloning a +/// platform device, hence, also increments the base device' reference count. +/// +/// # Invariants +/// +/// `Device` holds a valid reference of `ARef<device::Device>` whose underlying `struct device` is a +/// member of a `struct platform_device`. +#[derive(Clone)] +pub struct Device(ARef<device::Device>); + +impl Device { + /// Convert a raw kernel device into a `Device` + /// + /// # Safety + /// + /// `dev` must be an `Aref<device::Device>` whose underlying `bindings::device` is a member of a + /// `bindings::platform_device`. + unsafe fn from_dev(dev: ARef<device::Device>) -> Self { + Self(dev) + } + + fn as_raw(&self) -> *mut bindings::platform_device { + // SAFETY: By the type invariant `self.0.as_raw` is a pointer to the `struct device` + // embedded in `struct platform_device`. + unsafe { container_of!(self.0.as_raw(), bindings::platform_device, dev) }.cast_mut() + } +} + +impl AsRef<device::Device> for Device { + fn as_ref(&self) -> &device::Device { + &self.0 + } +} diff --git a/rust/kernel/revocable.rs b/rust/kernel/revocable.rs new file mode 100644 index 000000000000..1e5a9d25c21b --- /dev/null +++ b/rust/kernel/revocable.rs @@ -0,0 +1,219 @@ +// SPDX-License-Identifier: GPL-2.0 + +//! Revocable objects. +//! +//! The [`Revocable`] type wraps other types and allows access to them to be revoked. The existence +//! of a [`RevocableGuard`] ensures that objects remain valid. + +use crate::{bindings, prelude::*, sync::rcu, types::Opaque}; +use core::{ + marker::PhantomData, + ops::Deref, + ptr::drop_in_place, + sync::atomic::{AtomicBool, Ordering}, +}; + +/// An object that can become inaccessible at runtime. +/// +/// Once access is revoked and all concurrent users complete (i.e., all existing instances of +/// [`RevocableGuard`] are dropped), the wrapped object is also dropped. +/// +/// # Examples +/// +/// ``` +/// # use kernel::revocable::Revocable; +/// +/// struct Example { +/// a: u32, +/// b: u32, +/// } +/// +/// fn add_two(v: &Revocable<Example>) -> Option<u32> { +/// let guard = v.try_access()?; +/// Some(guard.a + guard.b) +/// } +/// +/// let v = KBox::pin_init(Revocable::new(Example { a: 10, b: 20 }), GFP_KERNEL).unwrap(); +/// assert_eq!(add_two(&v), Some(30)); +/// v.revoke(); +/// assert_eq!(add_two(&v), None); +/// ``` +/// +/// Sample example as above, but explicitly using the rcu read side lock. +/// +/// ``` +/// # use kernel::revocable::Revocable; +/// use kernel::sync::rcu; +/// +/// struct Example { +/// a: u32, +/// b: u32, +/// } +/// +/// fn add_two(v: &Revocable<Example>) -> Option<u32> { +/// let guard = rcu::read_lock(); +/// let e = v.try_access_with_guard(&guard)?; +/// Some(e.a + e.b) +/// } +/// +/// let v = KBox::pin_init(Revocable::new(Example { a: 10, b: 20 }), GFP_KERNEL).unwrap(); +/// assert_eq!(add_two(&v), Some(30)); +/// v.revoke(); +/// assert_eq!(add_two(&v), None); +/// ``` +#[pin_data(PinnedDrop)] +pub struct Revocable<T> { + is_available: AtomicBool, + #[pin] + data: Opaque<T>, +} + +// SAFETY: `Revocable` is `Send` if the wrapped object is also `Send`. This is because while the +// functionality exposed by `Revocable` can be accessed from any thread/CPU, it is possible that +// this isn't supported by the wrapped object. +unsafe impl<T: Send> Send for Revocable<T> {} + +// SAFETY: `Revocable` is `Sync` if the wrapped object is both `Send` and `Sync`. We require `Send` +// from the wrapped object as well because of `Revocable::revoke`, which can trigger the `Drop` +// implementation of the wrapped object from an arbitrary thread. +unsafe impl<T: Sync + Send> Sync for Revocable<T> {} + +impl<T> Revocable<T> { + /// Creates a new revocable instance of the given data. + pub fn new(data: impl PinInit<T>) -> impl PinInit<Self> { + pin_init!(Self { + is_available: AtomicBool::new(true), + data <- Opaque::pin_init(data), + }) + } + + /// Tries to access the revocable wrapped object. + /// + /// Returns `None` if the object has been revoked and is therefore no longer accessible. + /// + /// Returns a guard that gives access to the object otherwise; the object is guaranteed to + /// remain accessible while the guard is alive. In such cases, callers are not allowed to sleep + /// because another CPU may be waiting to complete the revocation of this object. + pub fn try_access(&self) -> Option<RevocableGuard<'_, T>> { + let guard = rcu::read_lock(); + if self.is_available.load(Ordering::Relaxed) { + // Since `self.is_available` is true, data is initialised and has to remain valid + // because the RCU read side lock prevents it from being dropped. + Some(RevocableGuard::new(self.data.get(), guard)) + } else { + None + } + } + + /// Tries to access the revocable wrapped object. + /// + /// Returns `None` if the object has been revoked and is therefore no longer accessible. + /// + /// Returns a shared reference to the object otherwise; the object is guaranteed to + /// remain accessible while the rcu read side guard is alive. In such cases, callers are not + /// allowed to sleep because another CPU may be waiting to complete the revocation of this + /// object. + pub fn try_access_with_guard<'a>(&'a self, _guard: &'a rcu::Guard) -> Option<&'a T> { + if self.is_available.load(Ordering::Relaxed) { + // SAFETY: Since `self.is_available` is true, data is initialised and has to remain + // valid because the RCU read side lock prevents it from being dropped. + Some(unsafe { &*self.data.get() }) + } else { + None + } + } + + /// # Safety + /// + /// Callers must ensure that there are no more concurrent users of the revocable object. + unsafe fn revoke_internal<const SYNC: bool>(&self) { + if self.is_available.swap(false, Ordering::Relaxed) { + if SYNC { + // SAFETY: Just an FFI call, there are no further requirements. + unsafe { bindings::synchronize_rcu() }; + } + + // SAFETY: We know `self.data` is valid because only one CPU can succeed the + // `compare_exchange` above that takes `is_available` from `true` to `false`. + unsafe { drop_in_place(self.data.get()) }; + } + } + + /// Revokes access to and drops the wrapped object. + /// + /// Access to the object is revoked immediately to new callers of [`Revocable::try_access`], + /// expecting that there are no concurrent users of the object. + /// + /// # Safety + /// + /// Callers must ensure that there are no more concurrent users of the revocable object. + pub unsafe fn revoke_nosync(&self) { + // SAFETY: By the safety requirement of this function, the caller ensures that nobody is + // accessing the data anymore and hence we don't have to wait for the grace period to + // finish. + unsafe { self.revoke_internal::<false>() } + } + + /// Revokes access to and drops the wrapped object. + /// + /// Access to the object is revoked immediately to new callers of [`Revocable::try_access`]. + /// + /// If there are concurrent users of the object (i.e., ones that called + /// [`Revocable::try_access`] beforehand and still haven't dropped the returned guard), this + /// function waits for the concurrent access to complete before dropping the wrapped object. + pub fn revoke(&self) { + // SAFETY: By passing `true` we ask `revoke_internal` to wait for the grace period to + // finish. + unsafe { self.revoke_internal::<true>() } + } +} + +#[pinned_drop] +impl<T> PinnedDrop for Revocable<T> { + fn drop(self: Pin<&mut Self>) { + // Drop only if the data hasn't been revoked yet (in which case it has already been + // dropped). + // SAFETY: We are not moving out of `p`, only dropping in place + let p = unsafe { self.get_unchecked_mut() }; + if *p.is_available.get_mut() { + // SAFETY: We know `self.data` is valid because no other CPU has changed + // `is_available` to `false` yet, and no other CPU can do it anymore because this CPU + // holds the only reference (mutable) to `self` now. + unsafe { drop_in_place(p.data.get()) }; + } + } +} + +/// A guard that allows access to a revocable object and keeps it alive. +/// +/// CPUs may not sleep while holding on to [`RevocableGuard`] because it's in atomic context +/// holding the RCU read-side lock. +/// +/// # Invariants +/// +/// The RCU read-side lock is held while the guard is alive. +pub struct RevocableGuard<'a, T> { + data_ref: *const T, + _rcu_guard: rcu::Guard, + _p: PhantomData<&'a ()>, +} + +impl<T> RevocableGuard<'_, T> { + fn new(data_ref: *const T, rcu_guard: rcu::Guard) -> Self { + Self { + data_ref, + _rcu_guard: rcu_guard, + _p: PhantomData, + } + } +} + +impl<T> Deref for RevocableGuard<'_, T> { + type Target = T; + + fn deref(&self) -> &Self::Target { + // SAFETY: By the type invariants, we hold the rcu read-side lock, so the object is + // guaranteed to remain valid. + unsafe { &*self.data_ref } + } +} diff --git a/rust/kernel/sync.rs b/rust/kernel/sync.rs index dffdaad972ce..3498fb344dc9 100644 --- a/rust/kernel/sync.rs +++ b/rust/kernel/sync.rs @@ -12,6 +12,7 @@ mod condvar; pub mod lock; mod locked_by; pub mod poll; +pub mod rcu; pub use arc::{Arc, ArcBorrow, UniqueArc}; pub use condvar::{new_condvar, CondVar, CondVarTimeoutResult}; diff --git a/rust/kernel/sync/rcu.rs b/rust/kernel/sync/rcu.rs new file mode 100644 index 000000000000..b51d9150ffe2 --- /dev/null +++ b/rust/kernel/sync/rcu.rs @@ -0,0 +1,47 @@ +// SPDX-License-Identifier: GPL-2.0 + +//! RCU support. +//! +//! C header: [`include/linux/rcupdate.h`](srctree/include/linux/rcupdate.h) + +use crate::{bindings, types::NotThreadSafe}; + +/// Evidence that the RCU read side lock is held on the current thread/CPU. +/// +/// The type is explicitly not `Send` because this property is per-thread/CPU. +/// +/// # Invariants +/// +/// The RCU read side lock is actually held while instances of this guard exist. +pub struct Guard(NotThreadSafe); + +impl Guard { + /// Acquires the RCU read side lock and returns a guard. + pub fn new() -> Self { + // SAFETY: An FFI call with no additional requirements. + unsafe { bindings::rcu_read_lock() }; + // INVARIANT: The RCU read side lock was just acquired above. + Self(NotThreadSafe) + } + + /// Explicitly releases the RCU read side lock. + pub fn unlock(self) {} +} + +impl Default for Guard { + fn default() -> Self { + Self::new() + } +} + +impl Drop for Guard { + fn drop(&mut self) { + // SAFETY: By the type invariants, the RCU read side is locked, so it is ok to unlock it. + unsafe { bindings::rcu_read_unlock() }; + } +} + +/// Acquires the RCU read side lock. +pub fn read_lock() -> Guard { + Guard::new() +} diff --git a/rust/kernel/types.rs b/rust/kernel/types.rs index 0dfaf45a755c..2bbaab83b9d6 100644 --- a/rust/kernel/types.rs +++ b/rust/kernel/types.rs @@ -326,6 +326,17 @@ impl<T> Opaque<T> { } } + /// Create an opaque pin-initializer from the given pin-initializer. + pub fn pin_init(slot: impl PinInit<T>) -> impl PinInit<Self> { + Self::ffi_init(|ptr: *mut T| { + // SAFETY: + // - `ptr` is a valid pointer to uninitialized memory, + // - `slot` is not accessed on error; the call is infallible, + // - `slot` is pinned in memory. + let _ = unsafe { init::PinInit::<T>::__pinned_init(slot, ptr) }; + }) + } + /// Creates a pin-initializer from the given initializer closure. /// /// The returned initializer calls the given closure with the pointer to the inner `T` of this diff --git a/rust/macros/module.rs b/rust/macros/module.rs index 2587f41b0d39..cdf94f4982df 100644 --- a/rust/macros/module.rs +++ b/rust/macros/module.rs @@ -228,6 +228,10 @@ pub(crate) fn module(ts: TokenStream) -> TokenStream { kernel::ThisModule::from_ptr(core::ptr::null_mut()) }}; + impl kernel::ModuleMetadata for {type_} {{ + const NAME: &'static kernel::str::CStr = kernel::c_str!(\"{name}\"); + }} + // Double nested modules, since then nobody can access the public items inside. mod __module_init {{ mod __module_init {{ diff --git a/samples/rust/Kconfig b/samples/rust/Kconfig index b0f74a81c8f9..918dbead2c0b 100644 --- a/samples/rust/Kconfig +++ b/samples/rust/Kconfig @@ -20,6 +20,16 @@ config SAMPLE_RUST_MINIMAL If unsure, say N. +config SAMPLE_RUST_MISC_DEVICE + tristate "Misc device" + help + This option builds the Rust misc device. + + To compile this as a module, choose M here: + the module will be called rust_misc_device. + + If unsure, say N. + config SAMPLE_RUST_PRINT tristate "Printing macros" help @@ -30,6 +40,27 @@ config SAMPLE_RUST_PRINT If unsure, say N. +config SAMPLE_RUST_DRIVER_PCI + tristate "PCI Driver" + depends on PCI + help + This option builds the Rust PCI driver sample. + + To compile this as a module, choose M here: + the module will be called driver_pci. + + If unsure, say N. + +config SAMPLE_RUST_DRIVER_PLATFORM + tristate "Platform Driver" + help + This option builds the Rust Platform driver sample. + + To compile this as a module, choose M here: + the module will be called rust_driver_platform. + + If unsure, say N. + config SAMPLE_RUST_HOSTPROGS bool "Host programs" help diff --git a/samples/rust/Makefile b/samples/rust/Makefile index c1a5c1655395..5a8ab0df0567 100644 --- a/samples/rust/Makefile +++ b/samples/rust/Makefile @@ -2,7 +2,10 @@ ccflags-y += -I$(src) # needed for trace events obj-$(CONFIG_SAMPLE_RUST_MINIMAL) += rust_minimal.o +obj-$(CONFIG_SAMPLE_RUST_MISC_DEVICE) += rust_misc_device.o obj-$(CONFIG_SAMPLE_RUST_PRINT) += rust_print.o +obj-$(CONFIG_SAMPLE_RUST_DRIVER_PCI) += rust_driver_pci.o +obj-$(CONFIG_SAMPLE_RUST_DRIVER_PLATFORM) += rust_driver_platform.o rust_print-y := rust_print_main.o rust_print_events.o diff --git a/samples/rust/rust_driver_pci.rs b/samples/rust/rust_driver_pci.rs new file mode 100644 index 000000000000..1fb6e44f3395 --- /dev/null +++ b/samples/rust/rust_driver_pci.rs @@ -0,0 +1,110 @@ +// SPDX-License-Identifier: GPL-2.0 + +//! Rust PCI driver sample (based on QEMU's `pci-testdev`). +//! +//! To make this driver probe, QEMU must be run with `-device pci-testdev`. + +use kernel::{bindings, c_str, devres::Devres, pci, prelude::*}; + +struct Regs; + +impl Regs { + const TEST: usize = 0x0; + const OFFSET: usize = 0x4; + const DATA: usize = 0x8; + const COUNT: usize = 0xC; + const END: usize = 0x10; +} + +type Bar0 = pci::Bar<{ Regs::END }>; + +#[derive(Debug)] +struct TestIndex(u8); + +impl TestIndex { + const NO_EVENTFD: Self = Self(0); +} + +struct SampleDriver { + pdev: pci::Device, + bar: Devres<Bar0>, +} + +kernel::pci_device_table!( + PCI_TABLE, + MODULE_PCI_TABLE, + <SampleDriver as pci::Driver>::IdInfo, + [( + pci::DeviceId::from_id(bindings::PCI_VENDOR_ID_REDHAT, 0x5), + TestIndex::NO_EVENTFD + )] +); + +impl SampleDriver { + fn testdev(index: &TestIndex, bar: &Bar0) -> Result<u32> { + // Select the test. + bar.writeb(index.0, Regs::TEST); + + let offset = u32::from_le(bar.readl(Regs::OFFSET)) as usize; + let data = bar.readb(Regs::DATA); + + // Write `data` to `offset` to increase `count` by one. + // + // Note that we need `try_writeb`, since `offset` can't be checked at compile-time. + bar.try_writeb(data, offset)?; + + Ok(bar.readl(Regs::COUNT)) + } +} + +impl pci::Driver for SampleDriver { + type IdInfo = TestIndex; + + const ID_TABLE: pci::IdTable<Self::IdInfo> = &PCI_TABLE; + + fn probe(pdev: &mut pci::Device, info: &Self::IdInfo) -> Result<Pin<KBox<Self>>> { + dev_dbg!( + pdev.as_ref(), + "Probe Rust PCI driver sample (PCI ID: 0x{:x}, 0x{:x}).\n", + pdev.vendor_id(), + pdev.device_id() + ); + + pdev.enable_device_mem()?; + pdev.set_master(); + + let bar = pdev.iomap_region_sized::<{ Regs::END }>(0, c_str!("rust_driver_pci"))?; + + let drvdata = KBox::new( + Self { + pdev: pdev.clone(), + bar, + }, + GFP_KERNEL, + )?; + + let bar = drvdata.bar.try_access().ok_or(ENXIO)?; + + dev_info!( + pdev.as_ref(), + "pci-testdev data-match count: {}\n", + Self::testdev(info, &bar)? + ); + + Ok(drvdata.into()) + } +} + +impl Drop for SampleDriver { + fn drop(&mut self) { + dev_dbg!(self.pdev.as_ref(), "Remove Rust PCI driver sample.\n"); + } +} + +kernel::module_pci_driver! { + type: SampleDriver, + name: "rust_driver_pci", + author: "Danilo Krummrich", + description: "Rust PCI driver", + license: "GPL v2", +} diff --git a/samples/rust/rust_driver_platform.rs b/samples/rust/rust_driver_platform.rs new file mode 100644 index 000000000000..8120609e2940 --- /dev/null +++ b/samples/rust/rust_driver_platform.rs @@ -0,0 +1,49 @@ +// SPDX-License-Identifier: GPL-2.0 + +//! Rust Platform driver sample. + +use kernel::{c_str, of, platform, prelude::*}; + +struct SampleDriver { + pdev: platform::Device, +} + +struct Info(u32); + +kernel::of_device_table!( + OF_TABLE, + MODULE_OF_TABLE, + <SampleDriver as platform::Driver>::IdInfo, + [(of::DeviceId::new(c_str!("test,rust-device")), Info(42))] +); + +impl platform::Driver for SampleDriver { + type IdInfo = Info; + const OF_ID_TABLE: Option<of::IdTable<Self::IdInfo>> = Some(&OF_TABLE); + + fn probe(pdev: &mut platform::Device, info: Option<&Self::IdInfo>) -> Result<Pin<KBox<Self>>> { + dev_dbg!(pdev.as_ref(), "Probe Rust Platform driver sample.\n"); + + if let Some(info) = info { + dev_info!(pdev.as_ref(), "Probed with info: '{}'.\n", info.0); + } + + let drvdata = KBox::new(Self { pdev: pdev.clone() }, GFP_KERNEL)?; + + Ok(drvdata.into()) + } +} + +impl Drop for SampleDriver { + fn drop(&mut self) { + dev_dbg!(self.pdev.as_ref(), "Remove Rust Platform driver sample.\n"); + } +} + +kernel::module_platform_driver! { + type: SampleDriver, + name: "rust_driver_platform", + author: "Danilo Krummrich", + description: "Rust Platform driver", + license: "GPL v2", +} diff --git a/samples/rust/rust_misc_device.rs b/samples/rust/rust_misc_device.rs new file mode 100644 index 000000000000..40ad7266c225 --- /dev/null +++ b/samples/rust/rust_misc_device.rs @@ -0,0 +1,238 @@ +// SPDX-License-Identifier: GPL-2.0 + +// Copyright (C) 2024 Google LLC. + +//! Rust misc device sample. + +/// Below is an example userspace C program that exercises this sample's functionality. +/// +/// ```c +/// #include <stdio.h> +/// #include <stdlib.h> +/// #include <errno.h> +/// #include <fcntl.h> +/// #include <unistd.h> +/// #include <sys/ioctl.h> +/// +/// #define RUST_MISC_DEV_FAIL _IO('|', 0) +/// #define RUST_MISC_DEV_HELLO _IO('|', 0x80) +/// #define RUST_MISC_DEV_GET_VALUE _IOR('|', 0x81, int) +/// #define RUST_MISC_DEV_SET_VALUE _IOW('|', 0x82, int) +/// +/// int main() { +/// int value, new_value; +/// int fd, ret; +/// +/// // Open the device file +/// printf("Opening /dev/rust-misc-device for reading and writing\n"); +/// fd = open("/dev/rust-misc-device", O_RDWR); +/// if (fd < 0) { +/// perror("open"); +/// return errno; +/// } +/// +/// // Make call into driver to say "hello" +/// printf("Calling Hello\n"); +/// ret = ioctl(fd, RUST_MISC_DEV_HELLO, NULL); +/// if (ret < 0) { +/// perror("ioctl: Failed to call into Hello"); +/// close(fd); +/// return errno; +/// } +/// +/// // Get initial value +/// printf("Fetching initial value\n"); +/// ret = ioctl(fd, RUST_MISC_DEV_GET_VALUE, &value); +/// if (ret < 0) { +/// perror("ioctl: Failed to fetch the initial value"); +/// close(fd); +/// return errno; +/// } +/// +/// value++; +/// +/// // Set value to something different +/// printf("Submitting new value (%d)\n", value); +/// ret = ioctl(fd, RUST_MISC_DEV_SET_VALUE, &value); +/// if (ret < 0) { +/// perror("ioctl: Failed to submit new value"); +/// close(fd); +/// return errno; +/// } +/// +/// // Ensure new value was applied +/// printf("Fetching new value\n"); +/// ret = ioctl(fd, RUST_MISC_DEV_GET_VALUE, &new_value); +/// if (ret < 0) { +/// perror("ioctl: Failed to fetch the new value"); +/// close(fd); +/// return errno; +/// } +/// +/// if (value != new_value) { +/// printf("Failed: Committed and retrieved values are different (%d - %d)\n", value, new_value); +/// close(fd); +/// return -1; +/// } +/// +/// // Call the unsuccessful ioctl +/// printf("Attempting to call in to an non-existent IOCTL\n"); +/// ret = ioctl(fd, RUST_MISC_DEV_FAIL, NULL); +/// if (ret < 0) { +/// perror("ioctl: Succeeded to fail - this was expected"); +/// } else { +/// printf("ioctl: Failed to fail\n"); +/// close(fd); +/// return -1; +/// } +/// +/// // Close the device file +/// printf("Closing /dev/rust-misc-device\n"); +/// close(fd); +/// +/// printf("Success\n"); +/// return 0; +/// } +/// ``` +use core::pin::Pin; + +use kernel::{ + c_str, + device::Device, + fs::File, + ioctl::{_IO, _IOC_SIZE, _IOR, _IOW}, + miscdevice::{MiscDevice, MiscDeviceOptions, MiscDeviceRegistration}, + new_mutex, + prelude::*, + sync::Mutex, + types::ARef, + uaccess::{UserSlice, UserSliceReader, UserSliceWriter}, +}; + +const RUST_MISC_DEV_HELLO: u32 = _IO('|' as u32, 0x80); +const RUST_MISC_DEV_GET_VALUE: u32 = _IOR::<i32>('|' as u32, 0x81); +const RUST_MISC_DEV_SET_VALUE: u32 = _IOW::<i32>('|' as u32, 0x82); + +module! { + type: RustMiscDeviceModule, + name: "rust_misc_device", + author: "Lee Jones", + description: "Rust misc device sample", + license: "GPL", +} + +#[pin_data] +struct RustMiscDeviceModule { + #[pin] + _miscdev: MiscDeviceRegistration<RustMiscDevice>, +} + +impl kernel::InPlaceModule for RustMiscDeviceModule { + fn init(_module: &'static ThisModule) -> impl PinInit<Self, Error> { + pr_info!("Initialising Rust Misc Device Sample\n"); + + let options = MiscDeviceOptions { + name: c_str!("rust-misc-device"), + }; + + try_pin_init!(Self { + _miscdev <- MiscDeviceRegistration::register(options), + }) + } +} + +struct Inner { + value: i32, +} + +#[pin_data(PinnedDrop)] +struct RustMiscDevice { + #[pin] + inner: Mutex<Inner>, + dev: ARef<Device>, +} + +#[vtable] +impl MiscDevice for RustMiscDevice { + type Ptr = Pin<KBox<Self>>; + + fn open(_file: &File, misc: &MiscDeviceRegistration<Self>) -> Result<Pin<KBox<Self>>> { + let dev = ARef::from(misc.device()); + + dev_info!(dev, "Opening Rust Misc Device Sample\n"); + + KBox::try_pin_init( + try_pin_init! { + RustMiscDevice { + inner <- new_mutex!( Inner{ value: 0_i32 } ), + dev: dev, + } + }, + GFP_KERNEL, + ) + } + + fn ioctl(me: Pin<&RustMiscDevice>, _file: &File, cmd: u32, arg: usize) -> Result<isize> { + dev_info!(me.dev, "IOCTLing Rust Misc Device Sample\n"); + + let size = _IOC_SIZE(cmd); + + match cmd { + RUST_MISC_DEV_GET_VALUE => me.get_value(UserSlice::new(arg, size).writer())?, + RUST_MISC_DEV_SET_VALUE => me.set_value(UserSlice::new(arg, size).reader())?, + RUST_MISC_DEV_HELLO => me.hello()?, + _ => { + dev_err!(me.dev, "-> IOCTL not recognised: {}\n", cmd); + return Err(ENOTTY); + } + }; + + Ok(0) + } +} + +#[pinned_drop] +impl PinnedDrop for RustMiscDevice { + fn drop(self: Pin<&mut Self>) { + dev_info!(self.dev, "Exiting the Rust Misc Device Sample\n"); + } +} + +impl RustMiscDevice { + fn set_value(&self, mut reader: UserSliceReader) -> Result<isize> { + let new_value = reader.read::<i32>()?; + let mut guard = self.inner.lock(); + + dev_info!( + self.dev, + "-> Copying data from userspace (value: {})\n", + new_value + ); + + guard.value = new_value; + Ok(0) + } + + fn get_value(&self, mut writer: UserSliceWriter) -> Result<isize> { + let guard = self.inner.lock(); + let value = guard.value; + + // Free-up the lock and use our locally cached instance from here + drop(guard); + + dev_info!( + self.dev, + "-> Copying data to userspace (value: {})\n", + &value + ); + + writer.write::<i32>(&value)?; + Ok(0) + } + + fn hello(&self) -> Result<isize> { + dev_info!(self.dev, "-> Hello from the Rust Misc Device\n"); + + Ok(0) + } +} diff --git a/sound/soc/sof/sof-client-ipc-flood-test.c b/sound/soc/sof/sof-client-ipc-flood-test.c index b35c98896968..11b6f7da2882 100644 --- a/sound/soc/sof/sof-client-ipc-flood-test.c +++ b/sound/soc/sof/sof-client-ipc-flood-test.c @@ -158,7 +158,6 @@ static ssize_t sof_ipc_flood_dfs_write(struct file *file, const char __user *buf unsigned long ipc_duration_ms = 0; bool flood_duration_test = false; unsigned long ipc_count = 0; - struct dentry *dentry; int err; char *string; int ret; @@ -182,14 +181,7 @@ static ssize_t sof_ipc_flood_dfs_write(struct file *file, const char __user *buf * ipc_duration_ms test floods the DSP for the time specified * in the debugfs entry. */ - dentry = file->f_path.dentry; - if (strcmp(dentry->d_name.name, DEBUGFS_IPC_FLOOD_COUNT) && - strcmp(dentry->d_name.name, DEBUGFS_IPC_FLOOD_DURATION)) { - ret = -EINVAL; - goto out; - } - - if (!strcmp(dentry->d_name.name, DEBUGFS_IPC_FLOOD_DURATION)) + if (debugfs_get_aux_num(file)) flood_duration_test = true; /* test completion criterion */ @@ -252,22 +244,15 @@ static ssize_t sof_ipc_flood_dfs_read(struct file *file, char __user *buffer, struct sof_ipc_flood_priv *priv = cdev->data; size_t size_ret; - struct dentry *dentry; + if (*ppos) + return 0; - dentry = file->f_path.dentry; - if (!strcmp(dentry->d_name.name, DEBUGFS_IPC_FLOOD_COUNT) || - !strcmp(dentry->d_name.name, DEBUGFS_IPC_FLOOD_DURATION)) { - if (*ppos) - return 0; + count = min_t(size_t, count, strlen(priv->buf)); + size_ret = copy_to_user(buffer, priv->buf, count); + if (size_ret) + return -EFAULT; - count = min_t(size_t, count, strlen(priv->buf)); - size_ret = copy_to_user(buffer, priv->buf, count); - if (size_ret) - return -EFAULT; - - *ppos += count; - return count; - } + *ppos += count; return count; } @@ -320,12 +305,12 @@ static int sof_ipc_flood_probe(struct auxiliary_device *auxdev, priv->dfs_root = debugfs_create_dir(dev_name(dev), debugfs_root); if (!IS_ERR_OR_NULL(priv->dfs_root)) { /* create read-write ipc_flood_count debugfs entry */ - debugfs_create_file(DEBUGFS_IPC_FLOOD_COUNT, 0644, priv->dfs_root, - cdev, &sof_ipc_flood_fops); + debugfs_create_file_aux_num(DEBUGFS_IPC_FLOOD_COUNT, 0644, + priv->dfs_root, cdev, 0, &sof_ipc_flood_fops); /* create read-write ipc_flood_duration_ms debugfs entry */ - debugfs_create_file(DEBUGFS_IPC_FLOOD_DURATION, 0644, - priv->dfs_root, cdev, &sof_ipc_flood_fops); + debugfs_create_file_aux_num(DEBUGFS_IPC_FLOOD_DURATION, 0644, + priv->dfs_root, cdev, 1, &sof_ipc_flood_fops); if (auxdev->id == 0) { /* diff --git a/tools/arch/arm64/include/asm/sysreg.h b/tools/arch/arm64/include/asm/sysreg.h index cd8420e8c3ad..150416682e2c 100644 --- a/tools/arch/arm64/include/asm/sysreg.h +++ b/tools/arch/arm64/include/asm/sysreg.h @@ -11,6 +11,7 @@ #include <linux/bits.h> #include <linux/stringify.h> +#include <linux/kasan-tags.h> #include <asm/gpr-num.h> @@ -108,6 +109,9 @@ #define set_pstate_ssbs(x) asm volatile(SET_PSTATE_SSBS(x)) #define set_pstate_dit(x) asm volatile(SET_PSTATE_DIT(x)) +/* Register-based PAN access, for save/restore purposes */ +#define SYS_PSTATE_PAN sys_reg(3, 0, 4, 2, 3) + #define __SYS_BARRIER_INSN(CRm, op2, Rt) \ __emit_inst(0xd5000000 | sys_insn(0, 3, 3, (CRm), (op2)) | ((Rt) & 0x1f)) @@ -123,6 +127,37 @@ #define SYS_DC_CIGSW sys_insn(1, 0, 7, 14, 4) #define SYS_DC_CIGDSW sys_insn(1, 0, 7, 14, 6) +#define SYS_IC_IALLUIS sys_insn(1, 0, 7, 1, 0) +#define SYS_IC_IALLU sys_insn(1, 0, 7, 5, 0) +#define SYS_IC_IVAU sys_insn(1, 3, 7, 5, 1) + +#define SYS_DC_IVAC sys_insn(1, 0, 7, 6, 1) +#define SYS_DC_IGVAC sys_insn(1, 0, 7, 6, 3) +#define SYS_DC_IGDVAC sys_insn(1, 0, 7, 6, 5) + +#define SYS_DC_CVAC sys_insn(1, 3, 7, 10, 1) +#define SYS_DC_CGVAC sys_insn(1, 3, 7, 10, 3) +#define SYS_DC_CGDVAC sys_insn(1, 3, 7, 10, 5) + +#define SYS_DC_CVAU sys_insn(1, 3, 7, 11, 1) + +#define SYS_DC_CVAP sys_insn(1, 3, 7, 12, 1) +#define SYS_DC_CGVAP sys_insn(1, 3, 7, 12, 3) +#define SYS_DC_CGDVAP sys_insn(1, 3, 7, 12, 5) + +#define SYS_DC_CVADP sys_insn(1, 3, 7, 13, 1) +#define SYS_DC_CGVADP sys_insn(1, 3, 7, 13, 3) +#define SYS_DC_CGDVADP sys_insn(1, 3, 7, 13, 5) + +#define SYS_DC_CIVAC sys_insn(1, 3, 7, 14, 1) +#define SYS_DC_CIGVAC sys_insn(1, 3, 7, 14, 3) +#define SYS_DC_CIGDVAC sys_insn(1, 3, 7, 14, 5) + +/* Data cache zero operations */ +#define SYS_DC_ZVA sys_insn(1, 3, 7, 4, 1) +#define SYS_DC_GVA sys_insn(1, 3, 7, 4, 3) +#define SYS_DC_GZVA sys_insn(1, 3, 7, 4, 4) + /* * Automatically generated definitions for system registers, the * manual encodings below are in the process of being converted to @@ -162,6 +197,84 @@ #define SYS_DBGDTRTX_EL0 sys_reg(2, 3, 0, 5, 0) #define SYS_DBGVCR32_EL2 sys_reg(2, 4, 0, 7, 0) +#define SYS_BRBINF_EL1(n) sys_reg(2, 1, 8, (n & 15), (((n & 16) >> 2) | 0)) +#define SYS_BRBINFINJ_EL1 sys_reg(2, 1, 9, 1, 0) +#define SYS_BRBSRC_EL1(n) sys_reg(2, 1, 8, (n & 15), (((n & 16) >> 2) | 1)) +#define SYS_BRBSRCINJ_EL1 sys_reg(2, 1, 9, 1, 1) +#define SYS_BRBTGT_EL1(n) sys_reg(2, 1, 8, (n & 15), (((n & 16) >> 2) | 2)) +#define SYS_BRBTGTINJ_EL1 sys_reg(2, 1, 9, 1, 2) +#define SYS_BRBTS_EL1 sys_reg(2, 1, 9, 0, 2) + +#define SYS_BRBCR_EL1 sys_reg(2, 1, 9, 0, 0) +#define SYS_BRBFCR_EL1 sys_reg(2, 1, 9, 0, 1) +#define SYS_BRBIDR0_EL1 sys_reg(2, 1, 9, 2, 0) + +#define SYS_TRCITECR_EL1 sys_reg(3, 0, 1, 2, 3) +#define SYS_TRCACATR(m) sys_reg(2, 1, 2, ((m & 7) << 1), (2 | (m >> 3))) +#define SYS_TRCACVR(m) sys_reg(2, 1, 2, ((m & 7) << 1), (0 | (m >> 3))) +#define SYS_TRCAUTHSTATUS sys_reg(2, 1, 7, 14, 6) +#define SYS_TRCAUXCTLR sys_reg(2, 1, 0, 6, 0) +#define SYS_TRCBBCTLR sys_reg(2, 1, 0, 15, 0) +#define SYS_TRCCCCTLR sys_reg(2, 1, 0, 14, 0) +#define SYS_TRCCIDCCTLR0 sys_reg(2, 1, 3, 0, 2) +#define SYS_TRCCIDCCTLR1 sys_reg(2, 1, 3, 1, 2) +#define SYS_TRCCIDCVR(m) sys_reg(2, 1, 3, ((m & 7) << 1), 0) +#define SYS_TRCCLAIMCLR sys_reg(2, 1, 7, 9, 6) +#define SYS_TRCCLAIMSET sys_reg(2, 1, 7, 8, 6) +#define SYS_TRCCNTCTLR(m) sys_reg(2, 1, 0, (4 | (m & 3)), 5) +#define SYS_TRCCNTRLDVR(m) sys_reg(2, 1, 0, (0 | (m & 3)), 5) +#define SYS_TRCCNTVR(m) sys_reg(2, 1, 0, (8 | (m & 3)), 5) +#define SYS_TRCCONFIGR sys_reg(2, 1, 0, 4, 0) +#define SYS_TRCDEVARCH sys_reg(2, 1, 7, 15, 6) +#define SYS_TRCDEVID sys_reg(2, 1, 7, 2, 7) +#define SYS_TRCEVENTCTL0R sys_reg(2, 1, 0, 8, 0) +#define SYS_TRCEVENTCTL1R sys_reg(2, 1, 0, 9, 0) +#define SYS_TRCEXTINSELR(m) sys_reg(2, 1, 0, (8 | (m & 3)), 4) +#define SYS_TRCIDR0 sys_reg(2, 1, 0, 8, 7) +#define SYS_TRCIDR10 sys_reg(2, 1, 0, 2, 6) +#define SYS_TRCIDR11 sys_reg(2, 1, 0, 3, 6) +#define SYS_TRCIDR12 sys_reg(2, 1, 0, 4, 6) +#define SYS_TRCIDR13 sys_reg(2, 1, 0, 5, 6) +#define SYS_TRCIDR1 sys_reg(2, 1, 0, 9, 7) +#define SYS_TRCIDR2 sys_reg(2, 1, 0, 10, 7) +#define SYS_TRCIDR3 sys_reg(2, 1, 0, 11, 7) +#define SYS_TRCIDR4 sys_reg(2, 1, 0, 12, 7) +#define SYS_TRCIDR5 sys_reg(2, 1, 0, 13, 7) +#define SYS_TRCIDR6 sys_reg(2, 1, 0, 14, 7) +#define SYS_TRCIDR7 sys_reg(2, 1, 0, 15, 7) +#define SYS_TRCIDR8 sys_reg(2, 1, 0, 0, 6) +#define SYS_TRCIDR9 sys_reg(2, 1, 0, 1, 6) +#define SYS_TRCIMSPEC(m) sys_reg(2, 1, 0, (m & 7), 7) +#define SYS_TRCITEEDCR sys_reg(2, 1, 0, 2, 1) +#define SYS_TRCOSLSR sys_reg(2, 1, 1, 1, 4) +#define SYS_TRCPRGCTLR sys_reg(2, 1, 0, 1, 0) +#define SYS_TRCQCTLR sys_reg(2, 1, 0, 1, 1) +#define SYS_TRCRSCTLR(m) sys_reg(2, 1, 1, (m & 15), (0 | (m >> 4))) +#define SYS_TRCRSR sys_reg(2, 1, 0, 10, 0) +#define SYS_TRCSEQEVR(m) sys_reg(2, 1, 0, (m & 3), 4) +#define SYS_TRCSEQRSTEVR sys_reg(2, 1, 0, 6, 4) +#define SYS_TRCSEQSTR sys_reg(2, 1, 0, 7, 4) +#define SYS_TRCSSCCR(m) sys_reg(2, 1, 1, (m & 7), 2) +#define SYS_TRCSSCSR(m) sys_reg(2, 1, 1, (8 | (m & 7)), 2) +#define SYS_TRCSSPCICR(m) sys_reg(2, 1, 1, (m & 7), 3) +#define SYS_TRCSTALLCTLR sys_reg(2, 1, 0, 11, 0) +#define SYS_TRCSTATR sys_reg(2, 1, 0, 3, 0) +#define SYS_TRCSYNCPR sys_reg(2, 1, 0, 13, 0) +#define SYS_TRCTRACEIDR sys_reg(2, 1, 0, 0, 1) +#define SYS_TRCTSCTLR sys_reg(2, 1, 0, 12, 0) +#define SYS_TRCVICTLR sys_reg(2, 1, 0, 0, 2) +#define SYS_TRCVIIECTLR sys_reg(2, 1, 0, 1, 2) +#define SYS_TRCVIPCSSCTLR sys_reg(2, 1, 0, 3, 2) +#define SYS_TRCVISSCTLR sys_reg(2, 1, 0, 2, 2) +#define SYS_TRCVMIDCCTLR0 sys_reg(2, 1, 3, 2, 2) +#define SYS_TRCVMIDCCTLR1 sys_reg(2, 1, 3, 3, 2) +#define SYS_TRCVMIDCVR(m) sys_reg(2, 1, 3, ((m & 7) << 1), 1) + +/* ETM */ +#define SYS_TRCOSLAR sys_reg(2, 1, 1, 0, 4) + +#define SYS_BRBCR_EL2 sys_reg(2, 4, 9, 0, 0) + #define SYS_MIDR_EL1 sys_reg(3, 0, 0, 0, 0) #define SYS_MPIDR_EL1 sys_reg(3, 0, 0, 0, 5) #define SYS_REVIDR_EL1 sys_reg(3, 0, 0, 0, 6) @@ -170,8 +283,6 @@ #define SYS_RGSR_EL1 sys_reg(3, 0, 1, 0, 5) #define SYS_GCR_EL1 sys_reg(3, 0, 1, 0, 6) -#define SYS_TRFCR_EL1 sys_reg(3, 0, 1, 2, 1) - #define SYS_TCR_EL1 sys_reg(3, 0, 2, 0, 2) #define SYS_APIAKEYLO_EL1 sys_reg(3, 0, 2, 1, 0) @@ -202,15 +313,38 @@ #define SYS_ERXCTLR_EL1 sys_reg(3, 0, 5, 4, 1) #define SYS_ERXSTATUS_EL1 sys_reg(3, 0, 5, 4, 2) #define SYS_ERXADDR_EL1 sys_reg(3, 0, 5, 4, 3) +#define SYS_ERXPFGF_EL1 sys_reg(3, 0, 5, 4, 4) +#define SYS_ERXPFGCTL_EL1 sys_reg(3, 0, 5, 4, 5) +#define SYS_ERXPFGCDN_EL1 sys_reg(3, 0, 5, 4, 6) #define SYS_ERXMISC0_EL1 sys_reg(3, 0, 5, 5, 0) #define SYS_ERXMISC1_EL1 sys_reg(3, 0, 5, 5, 1) +#define SYS_ERXMISC2_EL1 sys_reg(3, 0, 5, 5, 2) +#define SYS_ERXMISC3_EL1 sys_reg(3, 0, 5, 5, 3) #define SYS_TFSR_EL1 sys_reg(3, 0, 5, 6, 0) #define SYS_TFSRE0_EL1 sys_reg(3, 0, 5, 6, 1) #define SYS_PAR_EL1 sys_reg(3, 0, 7, 4, 0) #define SYS_PAR_EL1_F BIT(0) +/* When PAR_EL1.F == 1 */ #define SYS_PAR_EL1_FST GENMASK(6, 1) +#define SYS_PAR_EL1_PTW BIT(8) +#define SYS_PAR_EL1_S BIT(9) +#define SYS_PAR_EL1_AssuredOnly BIT(12) +#define SYS_PAR_EL1_TopLevel BIT(13) +#define SYS_PAR_EL1_Overlay BIT(14) +#define SYS_PAR_EL1_DirtyBit BIT(15) +#define SYS_PAR_EL1_F1_IMPDEF GENMASK_ULL(63, 48) +#define SYS_PAR_EL1_F1_RES0 (BIT(7) | BIT(10) | GENMASK_ULL(47, 16)) +#define SYS_PAR_EL1_RES1 BIT(11) +/* When PAR_EL1.F == 0 */ +#define SYS_PAR_EL1_SH GENMASK_ULL(8, 7) +#define SYS_PAR_EL1_NS BIT(9) +#define SYS_PAR_EL1_F0_IMPDEF BIT(10) +#define SYS_PAR_EL1_NSE BIT(11) +#define SYS_PAR_EL1_PA GENMASK_ULL(51, 12) +#define SYS_PAR_EL1_ATTR GENMASK_ULL(63, 56) +#define SYS_PAR_EL1_F0_RES0 (GENMASK_ULL(6, 1) | GENMASK_ULL(55, 52)) /*** Statistical Profiling Extension ***/ #define PMSEVFR_EL1_RES0_IMP \ @@ -274,6 +408,8 @@ #define SYS_ICC_IGRPEN0_EL1 sys_reg(3, 0, 12, 12, 6) #define SYS_ICC_IGRPEN1_EL1 sys_reg(3, 0, 12, 12, 7) +#define SYS_ACCDATA_EL1 sys_reg(3, 0, 13, 0, 5) + #define SYS_CNTKCTL_EL1 sys_reg(3, 0, 14, 1, 0) #define SYS_AIDR_EL1 sys_reg(3, 1, 0, 0, 7) @@ -286,7 +422,6 @@ #define SYS_PMCNTENCLR_EL0 sys_reg(3, 3, 9, 12, 2) #define SYS_PMOVSCLR_EL0 sys_reg(3, 3, 9, 12, 3) #define SYS_PMSWINC_EL0 sys_reg(3, 3, 9, 12, 4) -#define SYS_PMSELR_EL0 sys_reg(3, 3, 9, 12, 5) #define SYS_PMCEID0_EL0 sys_reg(3, 3, 9, 12, 6) #define SYS_PMCEID1_EL0 sys_reg(3, 3, 9, 12, 7) #define SYS_PMCCNTR_EL0 sys_reg(3, 3, 9, 13, 0) @@ -369,6 +504,7 @@ #define SYS_SCTLR_EL2 sys_reg(3, 4, 1, 0, 0) #define SYS_ACTLR_EL2 sys_reg(3, 4, 1, 0, 1) +#define SYS_SCTLR2_EL2 sys_reg(3, 4, 1, 0, 3) #define SYS_HCR_EL2 sys_reg(3, 4, 1, 1, 0) #define SYS_MDCR_EL2 sys_reg(3, 4, 1, 1, 1) #define SYS_CPTR_EL2 sys_reg(3, 4, 1, 1, 2) @@ -381,13 +517,15 @@ #define SYS_VTTBR_EL2 sys_reg(3, 4, 2, 1, 0) #define SYS_VTCR_EL2 sys_reg(3, 4, 2, 1, 2) -#define SYS_TRFCR_EL2 sys_reg(3, 4, 1, 2, 1) -#define SYS_HDFGRTR_EL2 sys_reg(3, 4, 3, 1, 4) -#define SYS_HDFGWTR_EL2 sys_reg(3, 4, 3, 1, 5) +#define SYS_VNCR_EL2 sys_reg(3, 4, 2, 2, 0) #define SYS_HAFGRTR_EL2 sys_reg(3, 4, 3, 1, 6) #define SYS_SPSR_EL2 sys_reg(3, 4, 4, 0, 0) #define SYS_ELR_EL2 sys_reg(3, 4, 4, 0, 1) #define SYS_SP_EL1 sys_reg(3, 4, 4, 1, 0) +#define SYS_SPSR_irq sys_reg(3, 4, 4, 3, 0) +#define SYS_SPSR_abt sys_reg(3, 4, 4, 3, 1) +#define SYS_SPSR_und sys_reg(3, 4, 4, 3, 2) +#define SYS_SPSR_fiq sys_reg(3, 4, 4, 3, 3) #define SYS_IFSR32_EL2 sys_reg(3, 4, 5, 0, 1) #define SYS_AFSR0_EL2 sys_reg(3, 4, 5, 1, 0) #define SYS_AFSR1_EL2 sys_reg(3, 4, 5, 1, 1) @@ -449,24 +587,49 @@ #define SYS_CONTEXTIDR_EL2 sys_reg(3, 4, 13, 0, 1) #define SYS_TPIDR_EL2 sys_reg(3, 4, 13, 0, 2) +#define SYS_SCXTNUM_EL2 sys_reg(3, 4, 13, 0, 7) + +#define __AMEV_op2(m) (m & 0x7) +#define __AMEV_CRm(n, m) (n | ((m & 0x8) >> 3)) +#define __SYS__AMEVCNTVOFF0n_EL2(m) sys_reg(3, 4, 13, __AMEV_CRm(0x8, m), __AMEV_op2(m)) +#define SYS_AMEVCNTVOFF0n_EL2(m) __SYS__AMEVCNTVOFF0n_EL2(m) +#define __SYS__AMEVCNTVOFF1n_EL2(m) sys_reg(3, 4, 13, __AMEV_CRm(0xA, m), __AMEV_op2(m)) +#define SYS_AMEVCNTVOFF1n_EL2(m) __SYS__AMEVCNTVOFF1n_EL2(m) #define SYS_CNTVOFF_EL2 sys_reg(3, 4, 14, 0, 3) #define SYS_CNTHCTL_EL2 sys_reg(3, 4, 14, 1, 0) +#define SYS_CNTHP_TVAL_EL2 sys_reg(3, 4, 14, 2, 0) +#define SYS_CNTHP_CTL_EL2 sys_reg(3, 4, 14, 2, 1) +#define SYS_CNTHP_CVAL_EL2 sys_reg(3, 4, 14, 2, 2) +#define SYS_CNTHV_TVAL_EL2 sys_reg(3, 4, 14, 3, 0) +#define SYS_CNTHV_CTL_EL2 sys_reg(3, 4, 14, 3, 1) +#define SYS_CNTHV_CVAL_EL2 sys_reg(3, 4, 14, 3, 2) /* VHE encodings for architectural EL0/1 system registers */ +#define SYS_BRBCR_EL12 sys_reg(2, 5, 9, 0, 0) #define SYS_SCTLR_EL12 sys_reg(3, 5, 1, 0, 0) +#define SYS_CPACR_EL12 sys_reg(3, 5, 1, 0, 2) +#define SYS_SCTLR2_EL12 sys_reg(3, 5, 1, 0, 3) +#define SYS_ZCR_EL12 sys_reg(3, 5, 1, 2, 0) +#define SYS_TRFCR_EL12 sys_reg(3, 5, 1, 2, 1) +#define SYS_SMCR_EL12 sys_reg(3, 5, 1, 2, 6) #define SYS_TTBR0_EL12 sys_reg(3, 5, 2, 0, 0) #define SYS_TTBR1_EL12 sys_reg(3, 5, 2, 0, 1) #define SYS_TCR_EL12 sys_reg(3, 5, 2, 0, 2) +#define SYS_TCR2_EL12 sys_reg(3, 5, 2, 0, 3) #define SYS_SPSR_EL12 sys_reg(3, 5, 4, 0, 0) #define SYS_ELR_EL12 sys_reg(3, 5, 4, 0, 1) #define SYS_AFSR0_EL12 sys_reg(3, 5, 5, 1, 0) #define SYS_AFSR1_EL12 sys_reg(3, 5, 5, 1, 1) #define SYS_ESR_EL12 sys_reg(3, 5, 5, 2, 0) #define SYS_TFSR_EL12 sys_reg(3, 5, 5, 6, 0) +#define SYS_FAR_EL12 sys_reg(3, 5, 6, 0, 0) +#define SYS_PMSCR_EL12 sys_reg(3, 5, 9, 9, 0) #define SYS_MAIR_EL12 sys_reg(3, 5, 10, 2, 0) #define SYS_AMAIR_EL12 sys_reg(3, 5, 10, 3, 0) #define SYS_VBAR_EL12 sys_reg(3, 5, 12, 0, 0) +#define SYS_CONTEXTIDR_EL12 sys_reg(3, 5, 13, 0, 1) +#define SYS_SCXTNUM_EL12 sys_reg(3, 5, 13, 0, 7) #define SYS_CNTKCTL_EL12 sys_reg(3, 5, 14, 1, 0) #define SYS_CNTP_TVAL_EL02 sys_reg(3, 5, 14, 2, 0) #define SYS_CNTP_CTL_EL02 sys_reg(3, 5, 14, 2, 1) @@ -477,6 +640,183 @@ #define SYS_SP_EL2 sys_reg(3, 6, 4, 1, 0) +/* AT instructions */ +#define AT_Op0 1 +#define AT_CRn 7 + +#define OP_AT_S1E1R sys_insn(AT_Op0, 0, AT_CRn, 8, 0) +#define OP_AT_S1E1W sys_insn(AT_Op0, 0, AT_CRn, 8, 1) +#define OP_AT_S1E0R sys_insn(AT_Op0, 0, AT_CRn, 8, 2) +#define OP_AT_S1E0W sys_insn(AT_Op0, 0, AT_CRn, 8, 3) +#define OP_AT_S1E1RP sys_insn(AT_Op0, 0, AT_CRn, 9, 0) +#define OP_AT_S1E1WP sys_insn(AT_Op0, 0, AT_CRn, 9, 1) +#define OP_AT_S1E1A sys_insn(AT_Op0, 0, AT_CRn, 9, 2) +#define OP_AT_S1E2R sys_insn(AT_Op0, 4, AT_CRn, 8, 0) +#define OP_AT_S1E2W sys_insn(AT_Op0, 4, AT_CRn, 8, 1) +#define OP_AT_S12E1R sys_insn(AT_Op0, 4, AT_CRn, 8, 4) +#define OP_AT_S12E1W sys_insn(AT_Op0, 4, AT_CRn, 8, 5) +#define OP_AT_S12E0R sys_insn(AT_Op0, 4, AT_CRn, 8, 6) +#define OP_AT_S12E0W sys_insn(AT_Op0, 4, AT_CRn, 8, 7) +#define OP_AT_S1E2A sys_insn(AT_Op0, 4, AT_CRn, 9, 2) + +/* TLBI instructions */ +#define TLBI_Op0 1 + +#define TLBI_Op1_EL1 0 /* Accessible from EL1 or higher */ +#define TLBI_Op1_EL2 4 /* Accessible from EL2 or higher */ + +#define TLBI_CRn_XS 8 /* Extra Slow (the common one) */ +#define TLBI_CRn_nXS 9 /* not Extra Slow (which nobody uses)*/ + +#define TLBI_CRm_IPAIS 0 /* S2 Inner-Shareable */ +#define TLBI_CRm_nROS 1 /* non-Range, Outer-Sharable */ +#define TLBI_CRm_RIS 2 /* Range, Inner-Sharable */ +#define TLBI_CRm_nRIS 3 /* non-Range, Inner-Sharable */ +#define TLBI_CRm_IPAONS 4 /* S2 Outer and Non-Shareable */ +#define TLBI_CRm_ROS 5 /* Range, Outer-Sharable */ +#define TLBI_CRm_RNS 6 /* Range, Non-Sharable */ +#define TLBI_CRm_nRNS 7 /* non-Range, Non-Sharable */ + +#define OP_TLBI_VMALLE1OS sys_insn(1, 0, 8, 1, 0) +#define OP_TLBI_VAE1OS sys_insn(1, 0, 8, 1, 1) +#define OP_TLBI_ASIDE1OS sys_insn(1, 0, 8, 1, 2) +#define OP_TLBI_VAAE1OS sys_insn(1, 0, 8, 1, 3) +#define OP_TLBI_VALE1OS sys_insn(1, 0, 8, 1, 5) +#define OP_TLBI_VAALE1OS sys_insn(1, 0, 8, 1, 7) +#define OP_TLBI_RVAE1IS sys_insn(1, 0, 8, 2, 1) +#define OP_TLBI_RVAAE1IS sys_insn(1, 0, 8, 2, 3) +#define OP_TLBI_RVALE1IS sys_insn(1, 0, 8, 2, 5) +#define OP_TLBI_RVAALE1IS sys_insn(1, 0, 8, 2, 7) +#define OP_TLBI_VMALLE1IS sys_insn(1, 0, 8, 3, 0) +#define OP_TLBI_VAE1IS sys_insn(1, 0, 8, 3, 1) +#define OP_TLBI_ASIDE1IS sys_insn(1, 0, 8, 3, 2) +#define OP_TLBI_VAAE1IS sys_insn(1, 0, 8, 3, 3) +#define OP_TLBI_VALE1IS sys_insn(1, 0, 8, 3, 5) +#define OP_TLBI_VAALE1IS sys_insn(1, 0, 8, 3, 7) +#define OP_TLBI_RVAE1OS sys_insn(1, 0, 8, 5, 1) +#define OP_TLBI_RVAAE1OS sys_insn(1, 0, 8, 5, 3) +#define OP_TLBI_RVALE1OS sys_insn(1, 0, 8, 5, 5) +#define OP_TLBI_RVAALE1OS sys_insn(1, 0, 8, 5, 7) +#define OP_TLBI_RVAE1 sys_insn(1, 0, 8, 6, 1) +#define OP_TLBI_RVAAE1 sys_insn(1, 0, 8, 6, 3) +#define OP_TLBI_RVALE1 sys_insn(1, 0, 8, 6, 5) +#define OP_TLBI_RVAALE1 sys_insn(1, 0, 8, 6, 7) +#define OP_TLBI_VMALLE1 sys_insn(1, 0, 8, 7, 0) +#define OP_TLBI_VAE1 sys_insn(1, 0, 8, 7, 1) +#define OP_TLBI_ASIDE1 sys_insn(1, 0, 8, 7, 2) +#define OP_TLBI_VAAE1 sys_insn(1, 0, 8, 7, 3) +#define OP_TLBI_VALE1 sys_insn(1, 0, 8, 7, 5) +#define OP_TLBI_VAALE1 sys_insn(1, 0, 8, 7, 7) +#define OP_TLBI_VMALLE1OSNXS sys_insn(1, 0, 9, 1, 0) +#define OP_TLBI_VAE1OSNXS sys_insn(1, 0, 9, 1, 1) +#define OP_TLBI_ASIDE1OSNXS sys_insn(1, 0, 9, 1, 2) +#define OP_TLBI_VAAE1OSNXS sys_insn(1, 0, 9, 1, 3) +#define OP_TLBI_VALE1OSNXS sys_insn(1, 0, 9, 1, 5) +#define OP_TLBI_VAALE1OSNXS sys_insn(1, 0, 9, 1, 7) +#define OP_TLBI_RVAE1ISNXS sys_insn(1, 0, 9, 2, 1) +#define OP_TLBI_RVAAE1ISNXS sys_insn(1, 0, 9, 2, 3) +#define OP_TLBI_RVALE1ISNXS sys_insn(1, 0, 9, 2, 5) +#define OP_TLBI_RVAALE1ISNXS sys_insn(1, 0, 9, 2, 7) +#define OP_TLBI_VMALLE1ISNXS sys_insn(1, 0, 9, 3, 0) +#define OP_TLBI_VAE1ISNXS sys_insn(1, 0, 9, 3, 1) +#define OP_TLBI_ASIDE1ISNXS sys_insn(1, 0, 9, 3, 2) +#define OP_TLBI_VAAE1ISNXS sys_insn(1, 0, 9, 3, 3) +#define OP_TLBI_VALE1ISNXS sys_insn(1, 0, 9, 3, 5) +#define OP_TLBI_VAALE1ISNXS sys_insn(1, 0, 9, 3, 7) +#define OP_TLBI_RVAE1OSNXS sys_insn(1, 0, 9, 5, 1) +#define OP_TLBI_RVAAE1OSNXS sys_insn(1, 0, 9, 5, 3) +#define OP_TLBI_RVALE1OSNXS sys_insn(1, 0, 9, 5, 5) +#define OP_TLBI_RVAALE1OSNXS sys_insn(1, 0, 9, 5, 7) +#define OP_TLBI_RVAE1NXS sys_insn(1, 0, 9, 6, 1) +#define OP_TLBI_RVAAE1NXS sys_insn(1, 0, 9, 6, 3) +#define OP_TLBI_RVALE1NXS sys_insn(1, 0, 9, 6, 5) +#define OP_TLBI_RVAALE1NXS sys_insn(1, 0, 9, 6, 7) +#define OP_TLBI_VMALLE1NXS sys_insn(1, 0, 9, 7, 0) +#define OP_TLBI_VAE1NXS sys_insn(1, 0, 9, 7, 1) +#define OP_TLBI_ASIDE1NXS sys_insn(1, 0, 9, 7, 2) +#define OP_TLBI_VAAE1NXS sys_insn(1, 0, 9, 7, 3) +#define OP_TLBI_VALE1NXS sys_insn(1, 0, 9, 7, 5) +#define OP_TLBI_VAALE1NXS sys_insn(1, 0, 9, 7, 7) +#define OP_TLBI_IPAS2E1IS sys_insn(1, 4, 8, 0, 1) +#define OP_TLBI_RIPAS2E1IS sys_insn(1, 4, 8, 0, 2) +#define OP_TLBI_IPAS2LE1IS sys_insn(1, 4, 8, 0, 5) +#define OP_TLBI_RIPAS2LE1IS sys_insn(1, 4, 8, 0, 6) +#define OP_TLBI_ALLE2OS sys_insn(1, 4, 8, 1, 0) +#define OP_TLBI_VAE2OS sys_insn(1, 4, 8, 1, 1) +#define OP_TLBI_ALLE1OS sys_insn(1, 4, 8, 1, 4) +#define OP_TLBI_VALE2OS sys_insn(1, 4, 8, 1, 5) +#define OP_TLBI_VMALLS12E1OS sys_insn(1, 4, 8, 1, 6) +#define OP_TLBI_RVAE2IS sys_insn(1, 4, 8, 2, 1) +#define OP_TLBI_RVALE2IS sys_insn(1, 4, 8, 2, 5) +#define OP_TLBI_ALLE2IS sys_insn(1, 4, 8, 3, 0) +#define OP_TLBI_VAE2IS sys_insn(1, 4, 8, 3, 1) +#define OP_TLBI_ALLE1IS sys_insn(1, 4, 8, 3, 4) +#define OP_TLBI_VALE2IS sys_insn(1, 4, 8, 3, 5) +#define OP_TLBI_VMALLS12E1IS sys_insn(1, 4, 8, 3, 6) +#define OP_TLBI_IPAS2E1OS sys_insn(1, 4, 8, 4, 0) +#define OP_TLBI_IPAS2E1 sys_insn(1, 4, 8, 4, 1) +#define OP_TLBI_RIPAS2E1 sys_insn(1, 4, 8, 4, 2) +#define OP_TLBI_RIPAS2E1OS sys_insn(1, 4, 8, 4, 3) +#define OP_TLBI_IPAS2LE1OS sys_insn(1, 4, 8, 4, 4) +#define OP_TLBI_IPAS2LE1 sys_insn(1, 4, 8, 4, 5) +#define OP_TLBI_RIPAS2LE1 sys_insn(1, 4, 8, 4, 6) +#define OP_TLBI_RIPAS2LE1OS sys_insn(1, 4, 8, 4, 7) +#define OP_TLBI_RVAE2OS sys_insn(1, 4, 8, 5, 1) +#define OP_TLBI_RVALE2OS sys_insn(1, 4, 8, 5, 5) +#define OP_TLBI_RVAE2 sys_insn(1, 4, 8, 6, 1) +#define OP_TLBI_RVALE2 sys_insn(1, 4, 8, 6, 5) +#define OP_TLBI_ALLE2 sys_insn(1, 4, 8, 7, 0) +#define OP_TLBI_VAE2 sys_insn(1, 4, 8, 7, 1) +#define OP_TLBI_ALLE1 sys_insn(1, 4, 8, 7, 4) +#define OP_TLBI_VALE2 sys_insn(1, 4, 8, 7, 5) +#define OP_TLBI_VMALLS12E1 sys_insn(1, 4, 8, 7, 6) +#define OP_TLBI_IPAS2E1ISNXS sys_insn(1, 4, 9, 0, 1) +#define OP_TLBI_RIPAS2E1ISNXS sys_insn(1, 4, 9, 0, 2) +#define OP_TLBI_IPAS2LE1ISNXS sys_insn(1, 4, 9, 0, 5) +#define OP_TLBI_RIPAS2LE1ISNXS sys_insn(1, 4, 9, 0, 6) +#define OP_TLBI_ALLE2OSNXS sys_insn(1, 4, 9, 1, 0) +#define OP_TLBI_VAE2OSNXS sys_insn(1, 4, 9, 1, 1) +#define OP_TLBI_ALLE1OSNXS sys_insn(1, 4, 9, 1, 4) +#define OP_TLBI_VALE2OSNXS sys_insn(1, 4, 9, 1, 5) +#define OP_TLBI_VMALLS12E1OSNXS sys_insn(1, 4, 9, 1, 6) +#define OP_TLBI_RVAE2ISNXS sys_insn(1, 4, 9, 2, 1) +#define OP_TLBI_RVALE2ISNXS sys_insn(1, 4, 9, 2, 5) +#define OP_TLBI_ALLE2ISNXS sys_insn(1, 4, 9, 3, 0) +#define OP_TLBI_VAE2ISNXS sys_insn(1, 4, 9, 3, 1) +#define OP_TLBI_ALLE1ISNXS sys_insn(1, 4, 9, 3, 4) +#define OP_TLBI_VALE2ISNXS sys_insn(1, 4, 9, 3, 5) +#define OP_TLBI_VMALLS12E1ISNXS sys_insn(1, 4, 9, 3, 6) +#define OP_TLBI_IPAS2E1OSNXS sys_insn(1, 4, 9, 4, 0) +#define OP_TLBI_IPAS2E1NXS sys_insn(1, 4, 9, 4, 1) +#define OP_TLBI_RIPAS2E1NXS sys_insn(1, 4, 9, 4, 2) +#define OP_TLBI_RIPAS2E1OSNXS sys_insn(1, 4, 9, 4, 3) +#define OP_TLBI_IPAS2LE1OSNXS sys_insn(1, 4, 9, 4, 4) +#define OP_TLBI_IPAS2LE1NXS sys_insn(1, 4, 9, 4, 5) +#define OP_TLBI_RIPAS2LE1NXS sys_insn(1, 4, 9, 4, 6) +#define OP_TLBI_RIPAS2LE1OSNXS sys_insn(1, 4, 9, 4, 7) +#define OP_TLBI_RVAE2OSNXS sys_insn(1, 4, 9, 5, 1) +#define OP_TLBI_RVALE2OSNXS sys_insn(1, 4, 9, 5, 5) +#define OP_TLBI_RVAE2NXS sys_insn(1, 4, 9, 6, 1) +#define OP_TLBI_RVALE2NXS sys_insn(1, 4, 9, 6, 5) +#define OP_TLBI_ALLE2NXS sys_insn(1, 4, 9, 7, 0) +#define OP_TLBI_VAE2NXS sys_insn(1, 4, 9, 7, 1) +#define OP_TLBI_ALLE1NXS sys_insn(1, 4, 9, 7, 4) +#define OP_TLBI_VALE2NXS sys_insn(1, 4, 9, 7, 5) +#define OP_TLBI_VMALLS12E1NXS sys_insn(1, 4, 9, 7, 6) + +/* Misc instructions */ +#define OP_GCSPUSHX sys_insn(1, 0, 7, 7, 4) +#define OP_GCSPOPCX sys_insn(1, 0, 7, 7, 5) +#define OP_GCSPOPX sys_insn(1, 0, 7, 7, 6) +#define OP_GCSPUSHM sys_insn(1, 3, 7, 7, 0) + +#define OP_BRB_IALL sys_insn(1, 1, 7, 2, 4) +#define OP_BRB_INJ sys_insn(1, 1, 7, 2, 5) +#define OP_CFP_RCTX sys_insn(1, 3, 7, 3, 4) +#define OP_DVP_RCTX sys_insn(1, 3, 7, 3, 5) +#define OP_COSP_RCTX sys_insn(1, 3, 7, 3, 6) +#define OP_CPP_RCTX sys_insn(1, 3, 7, 3, 7) + /* Common SCTLR_ELx flags. */ #define SCTLR_ELx_ENTP2 (BIT(60)) #define SCTLR_ELx_DSSBS (BIT(44)) @@ -555,16 +895,14 @@ /* Position the attr at the correct index */ #define MAIR_ATTRIDX(attr, idx) ((attr) << ((idx) * 8)) -/* id_aa64pfr0 */ -#define ID_AA64PFR0_EL1_ELx_64BIT_ONLY 0x1 -#define ID_AA64PFR0_EL1_ELx_32BIT_64BIT 0x2 - /* id_aa64mmfr0 */ #define ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MIN 0x0 +#define ID_AA64MMFR0_EL1_TGRAN4_LPA2 ID_AA64MMFR0_EL1_TGRAN4_52_BIT #define ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MAX 0x7 #define ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED_MIN 0x0 #define ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED_MAX 0x7 #define ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MIN 0x1 +#define ID_AA64MMFR0_EL1_TGRAN16_LPA2 ID_AA64MMFR0_EL1_TGRAN16_52_BIT #define ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MAX 0xf #define ARM64_MIN_PARANGE_BITS 32 @@ -572,6 +910,7 @@ #define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_DEFAULT 0x0 #define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_NONE 0x1 #define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MIN 0x2 +#define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_LPA2 0x3 #define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MAX 0x7 #ifdef CONFIG_ARM64_PA_BITS_52 @@ -582,11 +921,13 @@ #if defined(CONFIG_ARM64_4K_PAGES) #define ID_AA64MMFR0_EL1_TGRAN_SHIFT ID_AA64MMFR0_EL1_TGRAN4_SHIFT +#define ID_AA64MMFR0_EL1_TGRAN_LPA2 ID_AA64MMFR0_EL1_TGRAN4_52_BIT #define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MIN #define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MAX #define ID_AA64MMFR0_EL1_TGRAN_2_SHIFT ID_AA64MMFR0_EL1_TGRAN4_2_SHIFT #elif defined(CONFIG_ARM64_16K_PAGES) #define ID_AA64MMFR0_EL1_TGRAN_SHIFT ID_AA64MMFR0_EL1_TGRAN16_SHIFT +#define ID_AA64MMFR0_EL1_TGRAN_LPA2 ID_AA64MMFR0_EL1_TGRAN16_52_BIT #define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MIN #define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MAX #define ID_AA64MMFR0_EL1_TGRAN_2_SHIFT ID_AA64MMFR0_EL1_TGRAN16_2_SHIFT @@ -610,6 +951,19 @@ #define SYS_GCR_EL1_RRND (BIT(16)) #define SYS_GCR_EL1_EXCL_MASK 0xffffUL +#ifdef CONFIG_KASAN_HW_TAGS +/* + * KASAN always uses a whole byte for its tags. With CONFIG_KASAN_HW_TAGS it + * only uses tags in the range 0xF0-0xFF, which we map to MTE tags 0x0-0xF. + */ +#define __MTE_TAG_MIN (KASAN_TAG_MIN & 0xf) +#define __MTE_TAG_MAX (KASAN_TAG_MAX & 0xf) +#define __MTE_TAG_INCL GENMASK(__MTE_TAG_MAX, __MTE_TAG_MIN) +#define KERNEL_GCR_EL1_EXCL (SYS_GCR_EL1_EXCL_MASK & ~__MTE_TAG_INCL) +#else +#define KERNEL_GCR_EL1_EXCL SYS_GCR_EL1_EXCL_MASK +#endif + #define KERNEL_GCR_EL1 (SYS_GCR_EL1_RRND | KERNEL_GCR_EL1_EXCL) /* RGSR_EL1 Definitions */ @@ -626,15 +980,6 @@ /* Safe value for MPIDR_EL1: Bit31:RES1, Bit30:U:0, Bit24:MT:0 */ #define SYS_MPIDR_SAFE_VAL (BIT(31)) -#define TRFCR_ELx_TS_SHIFT 5 -#define TRFCR_ELx_TS_MASK ((0x3UL) << TRFCR_ELx_TS_SHIFT) -#define TRFCR_ELx_TS_VIRTUAL ((0x1UL) << TRFCR_ELx_TS_SHIFT) -#define TRFCR_ELx_TS_GUEST_PHYSICAL ((0x2UL) << TRFCR_ELx_TS_SHIFT) -#define TRFCR_ELx_TS_PHYSICAL ((0x3UL) << TRFCR_ELx_TS_SHIFT) -#define TRFCR_EL2_CX BIT(3) -#define TRFCR_ELx_ExTRE BIT(1) -#define TRFCR_ELx_E0TRE BIT(0) - /* GIC Hypervisor interface registers */ /* ICH_MISR_EL2 bit definitions */ #define ICH_MISR_EOI (1 << 0) @@ -716,6 +1061,22 @@ #define PIRx_ELx_PERM(idx, perm) ((perm) << ((idx) * 4)) +/* + * Permission Overlay Extension (POE) permission encodings. + */ +#define POE_NONE UL(0x0) +#define POE_R UL(0x1) +#define POE_X UL(0x2) +#define POE_RX UL(0x3) +#define POE_W UL(0x4) +#define POE_RW UL(0x5) +#define POE_XW UL(0x6) +#define POE_RXW UL(0x7) +#define POE_MASK UL(0xf) + +/* Initial value for Permission Overlay Extension for EL0 */ +#define POR_EL0_INIT POE_RXW + #define ARM64_FEATURE_FIELD_BITS 4 /* Defined for compatibility only, do not add new users. */ @@ -789,15 +1150,21 @@ /* * For registers without architectural names, or simply unsupported by * GAS. + * + * __check_r forces warnings to be generated by the compiler when + * evaluating r which wouldn't normally happen due to being passed to + * the assembler via __stringify(r). */ #define read_sysreg_s(r) ({ \ u64 __val; \ + u32 __maybe_unused __check_r = (u32)(r); \ asm volatile(__mrs_s("%0", r) : "=r" (__val)); \ __val; \ }) #define write_sysreg_s(v, r) do { \ u64 __val = (u64)(v); \ + u32 __maybe_unused __check_r = (u32)(r); \ asm volatile(__msr_s(r, "%x0") : : "rZ" (__val)); \ } while (0) @@ -827,6 +1194,8 @@ par; \ }) +#define SYS_FIELD_VALUE(reg, field, val) reg##_##field##_##val + #define SYS_FIELD_GET(reg, field, val) \ FIELD_GET(reg##_##field##_MASK, val) @@ -834,7 +1203,8 @@ FIELD_PREP(reg##_##field##_MASK, val) #define SYS_FIELD_PREP_ENUM(reg, field, val) \ - FIELD_PREP(reg##_##field##_MASK, reg##_##field##_##val) + FIELD_PREP(reg##_##field##_MASK, \ + SYS_FIELD_VALUE(reg, field, val)) #endif diff --git a/tools/bootconfig/main.c b/tools/bootconfig/main.c index 156b62a163c5..8a48cc2536f5 100644 --- a/tools/bootconfig/main.c +++ b/tools/bootconfig/main.c @@ -226,7 +226,7 @@ static int load_xbc_from_initrd(int fd, char **buf) /* Wrong Checksum */ rcsum = xbc_calc_checksum(*buf, size); if (csum != rcsum) { - pr_err("checksum error: %d != %d\n", csum, rcsum); + pr_err("checksum error: %u != %u\n", csum, rcsum); return -EINVAL; } @@ -395,7 +395,7 @@ static int apply_xbc(const char *path, const char *xbc_path) xbc_get_info(&ret, NULL); printf("\tNumber of nodes: %d\n", ret); printf("\tSize: %u bytes\n", (unsigned int)size); - printf("\tChecksum: %d\n", (unsigned int)csum); + printf("\tChecksum: %u\n", (unsigned int)csum); /* TODO: Check the options by schema */ xbc_exit(); diff --git a/tools/include/linux/kasan-tags.h b/tools/include/linux/kasan-tags.h new file mode 100644 index 000000000000..4f85f562512c --- /dev/null +++ b/tools/include/linux/kasan-tags.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_KASAN_TAGS_H +#define _LINUX_KASAN_TAGS_H + +#define KASAN_TAG_KERNEL 0xFF /* native kernel pointers tag */ +#define KASAN_TAG_INVALID 0xFE /* inaccessible memory tag */ +#define KASAN_TAG_MAX 0xFD /* maximum value for random tags */ + +#ifdef CONFIG_KASAN_HW_TAGS +#define KASAN_TAG_MIN 0xF0 /* minimum value for random tags */ +#else +#define KASAN_TAG_MIN 0x00 /* minimum value for random tags */ +#endif + +#endif /* LINUX_KASAN_TAGS_H */ diff --git a/tools/testing/cxl/test/cxl.c b/tools/testing/cxl/test/cxl.c index d0337c11f9ee..cc8948f49117 100644 --- a/tools/testing/cxl/test/cxl.c +++ b/tools/testing/cxl/test/cxl.c @@ -725,7 +725,7 @@ static void default_mock_decoder(struct cxl_decoder *cxld) cxld->reset = mock_decoder_reset; } -static int first_decoder(struct device *dev, void *data) +static int first_decoder(struct device *dev, const void *data) { struct cxl_decoder *cxld; diff --git a/tools/testing/selftests/kvm/arm64/aarch32_id_regs.c b/tools/testing/selftests/kvm/arm64/aarch32_id_regs.c index 447d61cae4db..cef8f7323ceb 100644 --- a/tools/testing/selftests/kvm/arm64/aarch32_id_regs.c +++ b/tools/testing/selftests/kvm/arm64/aarch32_id_regs.c @@ -147,7 +147,7 @@ static bool vcpu_aarch64_only(struct kvm_vcpu *vcpu) val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1)); el0 = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0), val); - return el0 == ID_AA64PFR0_EL1_ELx_64BIT_ONLY; + return el0 == ID_AA64PFR0_EL1_EL0_IMP; } int main(void) diff --git a/tools/testing/selftests/kvm/arm64/set_id_regs.c b/tools/testing/selftests/kvm/arm64/set_id_regs.c index 3dd85ce8551c..217541fe6536 100644 --- a/tools/testing/selftests/kvm/arm64/set_id_regs.c +++ b/tools/testing/selftests/kvm/arm64/set_id_regs.c @@ -666,7 +666,7 @@ int main(void) /* Check for AARCH64 only system */ val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1)); el0 = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0), val); - aarch64_only = (el0 == ID_AA64PFR0_EL1_ELx_64BIT_ONLY); + aarch64_only = (el0 == ID_AA64PFR0_EL1_EL0_IMP); ksft_print_header(); |