summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.mailmap3
-rw-r--r--Documentation/arch/powerpc/cxl.rst3
-rw-r--r--Documentation/userspace-api/landlock.rst6
-rw-r--r--MAINTAINERS26
-rw-r--r--arch/arm64/include/asm/hugetlb.h26
-rw-r--r--arch/arm64/include/asm/kvm_arm.h2
-rw-r--r--arch/arm64/include/asm/kvm_host.h2
-rw-r--r--arch/arm64/kvm/arm.c37
-rw-r--r--arch/arm64/kvm/vmid.c11
-rw-r--r--arch/arm64/mm/hugetlbpage.c61
-rw-r--r--arch/arm64/mm/init.c7
-rw-r--r--arch/loongarch/include/asm/hugetlb.h6
-rw-r--r--arch/mips/include/asm/hugetlb.h6
-rw-r--r--arch/parisc/include/asm/hugetlb.h2
-rw-r--r--arch/parisc/mm/hugetlbpage.c2
-rw-r--r--arch/powerpc/include/asm/hugetlb.h6
-rw-r--r--arch/riscv/include/asm/cmpxchg.h2
-rw-r--r--arch/riscv/include/asm/futex.h2
-rw-r--r--arch/riscv/include/asm/hugetlb.h3
-rw-r--r--arch/riscv/kernel/cacheinfo.c12
-rw-r--r--arch/riscv/kernel/cpufeature.c2
-rw-r--r--arch/riscv/kernel/setup.c2
-rw-r--r--arch/riscv/kernel/signal.c6
-rw-r--r--arch/riscv/kvm/aia_imsic.c1
-rw-r--r--arch/riscv/kvm/vcpu_sbi_hsm.c11
-rw-r--r--arch/riscv/kvm/vcpu_sbi_replace.c15
-rw-r--r--arch/riscv/kvm/vcpu_sbi_system.c3
-rw-r--r--arch/riscv/mm/hugetlbpage.c2
-rw-r--r--arch/s390/include/asm/hugetlb.h16
-rw-r--r--arch/s390/mm/hugetlbpage.c4
-rw-r--r--arch/sparc/include/asm/hugetlb.h2
-rw-r--r--arch/sparc/mm/hugetlbpage.c2
-rw-r--r--arch/x86/entry/common.c1
-rw-r--r--arch/x86/events/core.c2
-rw-r--r--arch/x86/events/intel/core.c85
-rw-r--r--arch/x86/events/rapl.c1
-rw-r--r--arch/x86/kernel/cpu/cyrix.c4
-rw-r--r--arch/x86/kernel/devicetree.c3
-rw-r--r--arch/x86/kernel/irq.c2
-rw-r--r--arch/x86/kvm/mmu/mmu.c12
-rw-r--r--arch/x86/kvm/vmx/nested.c11
-rw-r--r--arch/x86/kvm/x86.c2
-rw-r--r--block/bio.c2
-rw-r--r--block/blk-merge.c2
-rw-r--r--block/blk-settings.c14
-rw-r--r--block/blk-zoned.c76
-rw-r--r--block/blk.h9
-rw-r--r--drivers/ata/ahci.h8
-rw-r--r--drivers/ata/libahci.c1
-rw-r--r--drivers/ata/libata-core.c4
-rw-r--r--drivers/bluetooth/btusb.c6
-rw-r--r--drivers/firmware/cirrus/cs_dsp.c24
-rw-r--r--drivers/firmware/efi/cper-arm.c2
-rw-r--r--drivers/firmware/efi/cper-x86.c2
-rw-r--r--drivers/firmware/efi/mokvar-table.c57
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v11_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v12_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c5
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c86
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c14
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c3
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c25
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c8
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c26
-rw-r--r--drivers/gpu/drm/drm_fbdev_dma.c217
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c9
-rw-r--r--drivers/gpu/drm/imagination/Makefile2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c1
-rw-r--r--drivers/gpu/drm/vkms/vkms_composer.c2
-rw-r--r--drivers/gpu/drm/xe/regs/xe_engine_regs.h1
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.c2
-rw-r--r--drivers/gpu/drm/xe/xe_oa.c5
-rw-r--r--drivers/gpu/drm/xe/xe_vm.c40
-rw-r--r--drivers/i2c/busses/i2c-amd-asf-plat.c1
-rw-r--r--drivers/i2c/busses/i2c-ls2x.c16
-rw-r--r--drivers/i2c/busses/i2c-npcm7xx.c7
-rw-r--r--drivers/idle/intel_idle.c4
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h1
-rw-r--r--drivers/infiniband/hw/bnxt_re/hw_counters.c4
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c2
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c22
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.h8
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c64
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h2
-rw-r--r--drivers/infiniband/hw/mana/main.c2
-rw-r--r--drivers/infiniband/hw/mlx5/ah.c3
-rw-r--r--drivers/infiniband/hw/mlx5/counters.c8
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c16
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c1
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c10
-rw-r--r--drivers/infiniband/hw/mlx5/qp.h1
-rw-r--r--drivers/infiniband/hw/mlx5/umr.c83
-rw-r--r--drivers/iommu/amd/iommu.c4
-rw-r--r--drivers/iommu/intel/dmar.c1
-rw-r--r--drivers/iommu/intel/iommu.c10
-rw-r--r--drivers/md/dm-integrity.c25
-rw-r--r--drivers/md/dm-vdo/dedupe.c1
-rw-r--r--drivers/net/dsa/realtek/Kconfig6
-rw-r--r--drivers/net/dsa/realtek/Makefile3
-rw-r--r--drivers/net/dsa/realtek/rtl8366rb-leds.c177
-rw-r--r--drivers/net/dsa/realtek/rtl8366rb.c258
-rw-r--r--drivers/net/dsa/realtek/rtl8366rb.h107
-rw-r--r--drivers/net/ethernet/cadence/macb.h2
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c12
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c103
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc4_pf.c2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ethtool.c7
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx_dqo.c2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.c3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.c8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib_private.h1
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c2
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c14
-rw-r--r--drivers/net/ethernet/ti/Kconfig1
-rw-r--r--drivers/net/ethernet/ti/icssg/icss_iep.c21
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c21
-rw-r--r--drivers/net/loopback.c14
-rw-r--r--drivers/net/netdevsim/ethtool.c2
-rw-r--r--drivers/net/phy/qcom/qca807x.c2
-rw-r--r--drivers/net/usb/gl620a.c4
-rw-r--r--drivers/scsi/scsi_lib.c14
-rw-r--r--drivers/thermal/gov_power_allocator.c32
-rw-r--r--drivers/thermal/thermal_of.c50
-rw-r--r--drivers/ufs/core/ufs_bsg.c6
-rw-r--r--drivers/ufs/core/ufshcd.c38
-rw-r--r--drivers/vhost/vhost.c2
-rw-r--r--fs/afs/server.c3
-rw-r--r--fs/afs/server_list.c4
-rw-r--r--fs/bcachefs/btree_cache.c9
-rw-r--r--fs/bcachefs/btree_io.c2
-rw-r--r--fs/bcachefs/btree_key_cache.c2
-rw-r--r--fs/bcachefs/btree_locking.c5
-rw-r--r--fs/bcachefs/btree_locking.h2
-rw-r--r--fs/bcachefs/data_update.c1
-rw-r--r--fs/bcachefs/dirent.h5
-rw-r--r--fs/bcachefs/extents.h2
-rw-r--r--fs/bcachefs/fs-common.c11
-rw-r--r--fs/bcachefs/fs-io.c1
-rw-r--r--fs/bcachefs/fsck.c21
-rw-r--r--fs/bcachefs/journal.c4
-rw-r--r--fs/bcachefs/sb-downgrade.c5
-rw-r--r--fs/bcachefs/six.c5
-rw-r--r--fs/bcachefs/six.h7
-rw-r--r--fs/btrfs/extent_map.c83
-rw-r--r--fs/btrfs/file.c9
-rw-r--r--fs/btrfs/tests/delayed-refs-tests.c1
-rw-r--r--fs/btrfs/volumes.c6
-rw-r--r--fs/efivarfs/file.c10
-rw-r--r--fs/efivarfs/super.c3
-rw-r--r--fs/fuse/dev.c6
-rw-r--r--fs/fuse/dir.c2
-rw-r--r--fs/fuse/file.c13
-rw-r--r--fs/iomap/direct-io.c8
-rw-r--r--fs/namei.c24
-rw-r--r--fs/nfs/delegation.c37
-rw-r--r--fs/nfs/delegation.h1
-rw-r--r--fs/nfs/direct.c23
-rw-r--r--fs/nfs/nfs4proc.c10
-rw-r--r--fs/nsfs.c1
-rw-r--r--fs/overlayfs/copy_up.c2
-rw-r--r--fs/pidfs.c1
-rw-r--r--include/asm-generic/hugetlb.h2
-rw-r--r--include/asm-generic/vmlinux.lds.h2
-rw-r--r--include/linux/blkdev.h8
-rw-r--r--include/linux/call_once.h47
-rw-r--r--include/linux/compiler.h2
-rw-r--r--include/linux/fs.h6
-rw-r--r--include/linux/hugetlb.h4
-rw-r--r--include/linux/nfs4.h1
-rw-r--r--include/linux/rcuref.h9
-rw-r--r--include/linux/socket.h2
-rw-r--r--include/linux/sunrpc/sched.h3
-rw-r--r--include/net/sock.h1
-rw-r--r--include/sound/cs35l56.h31
-rw-r--r--include/trace/events/afs.h2
-rw-r--r--include/trace/events/sunrpc.h3
-rw-r--r--include/uapi/linux/landlock.h8
-rw-r--r--io_uring/net.c4
-rw-r--r--kernel/events/core.c31
-rw-r--r--kernel/events/uprobes.c15
-rw-r--r--kernel/sched/core.c2
-rw-r--r--kernel/sched/ext.c11
-rw-r--r--kernel/trace/ftrace.c27
-rw-r--r--kernel/trace/trace_events_hist.c30
-rw-r--r--kernel/vhost_task.c4
-rw-r--r--kernel/workqueue.c4
-rw-r--r--lib/rcuref.c5
-rw-r--r--mm/filemap.c2
-rw-r--r--mm/hugetlb.c4
-rw-r--r--mm/truncate.c2
-rw-r--r--net/bluetooth/l2cap_core.c9
-rw-r--r--net/core/dev.c14
-rw-r--r--net/core/gro.c1
-rw-r--r--net/core/scm.c10
-rw-r--r--net/core/skbuff.c2
-rw-r--r--net/core/sock.c27
-rw-r--r--net/core/sysctl_net_core.c3
-rw-r--r--net/ethtool/common.c16
-rw-r--r--net/ethtool/common.h6
-rw-r--r--net/ethtool/ioctl.c4
-rw-r--r--net/ethtool/rings.c9
-rw-r--r--net/ipv4/tcp.c26
-rw-r--r--net/ipv4/tcp_minisocks.c10
-rw-r--r--net/ipv6/rpl_iptunnel.c14
-rw-r--r--net/ipv6/seg6_iptunnel.c14
-rw-r--r--net/mptcp/pm_netlink.c5
-rw-r--r--net/mptcp/protocol.h2
-rw-r--r--net/mptcp/subflow.c20
-rw-r--r--net/netlink/af_netlink.c10
-rw-r--r--net/rds/tcp.c8
-rw-r--r--net/rxrpc/ar-internal.h1
-rw-r--r--net/rxrpc/input.c2
-rw-r--r--net/rxrpc/peer_event.c9
-rw-r--r--net/rxrpc/peer_object.c5
-rw-r--r--net/rxrpc/rxperf.c12
-rw-r--r--net/smc/af_smc.c5
-rw-r--r--net/sunrpc/cache.c10
-rw-r--r--net/sunrpc/sched.c2
-rw-r--r--net/sunrpc/svcsock.c5
-rw-r--r--net/sunrpc/xprtsock.c18
-rw-r--r--net/unix/af_unix.c1
-rw-r--r--security/integrity/evm/evm_crypto.c2
-rw-r--r--security/integrity/evm/evm_main.c2
-rw-r--r--security/integrity/ima/ima.h3
-rw-r--r--security/integrity/ima/ima_main.c13
-rw-r--r--security/landlock/net.c3
-rw-r--r--security/landlock/ruleset.c2
-rw-r--r--sound/pci/hda/cs35l56_hda_spi.c3
-rw-r--r--sound/pci/hda/patch_realtek.c2
-rw-r--r--sound/soc/codecs/cs35l56-shared.c80
-rw-r--r--sound/soc/codecs/cs35l56-spi.c3
-rw-r--r--sound/soc/codecs/es8328.c15
-rw-r--r--sound/soc/codecs/tas2764.c10
-rw-r--r--sound/soc/codecs/tas2764.h8
-rw-r--r--sound/soc/codecs/tas2770.c2
-rw-r--r--sound/soc/fsl/fsl_sai.c6
-rw-r--r--sound/soc/fsl/imx-audmix.c4
-rw-r--r--sound/soc/intel/boards/sof_sdw.c7
-rw-r--r--sound/soc/sof/intel/hda.c18
-rw-r--r--sound/usb/midi.c2
-rw-r--r--sound/usb/quirks.c1
-rw-r--r--tools/arch/arm64/tools/Makefile6
-rw-r--r--tools/bpf/Makefile6
-rw-r--r--tools/bpf/bpftool/Documentation/Makefile6
-rw-r--r--tools/bpf/bpftool/Makefile6
-rw-r--r--tools/bpf/resolve_btfids/Makefile2
-rw-r--r--tools/bpf/runqslower/Makefile5
-rw-r--r--tools/build/Makefile8
-rw-r--r--tools/lib/bpf/Makefile13
-rw-r--r--tools/lib/perf/Makefile13
-rw-r--r--tools/lib/thermal/Makefile13
-rw-r--r--tools/objtool/Makefile6
-rw-r--r--tools/objtool/check.c7
-rw-r--r--tools/objtool/include/objtool/special.h2
-rw-r--r--tools/objtool/noreturns.h2
-rw-r--r--tools/perf/Makefile.perf41
-rw-r--r--tools/scripts/Makefile.include30
-rwxr-xr-xtools/sound/dapm-graph2
-rw-r--r--tools/testing/selftests/bpf/Makefile.docs6
-rwxr-xr-xtools/testing/selftests/drivers/net/hds.py145
-rwxr-xr-xtools/testing/selftests/drivers/net/queues.py7
-rw-r--r--tools/testing/selftests/ftrace/test.d/dynevent/add_remove_fprobe.tc18
-rw-r--r--tools/testing/selftests/hid/Makefile2
-rw-r--r--tools/testing/selftests/landlock/.gitignore2
-rw-r--r--tools/testing/selftests/landlock/common.h1
-rw-r--r--tools/testing/selftests/landlock/config3
-rw-r--r--tools/testing/selftests/landlock/net_test.c124
-rw-r--r--tools/testing/selftests/net/lib/Makefile3
-rw-r--r--tools/testing/selftests/net/lib/xdp_dummy.bpf.c13
-rw-r--r--tools/testing/selftests/rseq/rseq-riscv-bits.h6
-rw-r--r--tools/testing/selftests/rseq/rseq-riscv.h2
-rw-r--r--tools/thermal/lib/Makefile13
-rw-r--r--tools/tracing/latency/Makefile6
-rw-r--r--tools/tracing/rtla/Makefile6
-rw-r--r--tools/verification/rv/Makefile6
289 files changed, 2580 insertions, 1393 deletions
diff --git a/.mailmap b/.mailmap
index a897c16d3bae..77bcfcfd471c 100644
--- a/.mailmap
+++ b/.mailmap
@@ -522,6 +522,7 @@ Nadav Amit <nadav.amit@gmail.com> <namit@cs.technion.ac.il>
Nadia Yvette Chambers <nyc@holomorphy.com> William Lee Irwin III <wli@holomorphy.com>
Naoya Horiguchi <nao.horiguchi@gmail.com> <n-horiguchi@ah.jp.nec.com>
Naoya Horiguchi <nao.horiguchi@gmail.com> <naoya.horiguchi@nec.com>
+Natalie Vock <natalie.vock@gmx.de> <friedrich.vock@gmx.de>
Nathan Chancellor <nathan@kernel.org> <natechancellor@gmail.com>
Naveen N Rao <naveen@kernel.org> <naveen.n.rao@linux.ibm.com>
Naveen N Rao <naveen@kernel.org> <naveen.n.rao@linux.vnet.ibm.com>
@@ -613,6 +614,8 @@ Richard Leitner <richard.leitner@linux.dev> <me@g0hl1n.net>
Richard Leitner <richard.leitner@linux.dev> <richard.leitner@skidata.com>
Robert Foss <rfoss@kernel.org> <robert.foss@linaro.org>
Rocky Liao <quic_rjliao@quicinc.com> <rjliao@codeaurora.org>
+Rodrigo Siqueira <siqueira@igalia.com> <rodrigosiqueiramelo@gmail.com>
+Rodrigo Siqueira <siqueira@igalia.com> <Rodrigo.Siqueira@amd.com>
Roman Gushchin <roman.gushchin@linux.dev> <guro@fb.com>
Roman Gushchin <roman.gushchin@linux.dev> <guroan@gmail.com>
Roman Gushchin <roman.gushchin@linux.dev> <klamm@yandex-team.ru>
diff --git a/Documentation/arch/powerpc/cxl.rst b/Documentation/arch/powerpc/cxl.rst
index d2d77057610e..778adda740d2 100644
--- a/Documentation/arch/powerpc/cxl.rst
+++ b/Documentation/arch/powerpc/cxl.rst
@@ -18,6 +18,7 @@ Introduction
both access system memory directly and with the same effective
addresses.
+ **This driver is deprecated and will be removed in a future release.**
Hardware overview
=================
@@ -453,7 +454,7 @@ Sysfs Class
A cxl sysfs class is added under /sys/class/cxl to facilitate
enumeration and tuning of the accelerators. Its layout is
- described in Documentation/ABI/testing/sysfs-class-cxl
+ described in Documentation/ABI/obsolete/sysfs-class-cxl
Udev rules
diff --git a/Documentation/userspace-api/landlock.rst b/Documentation/userspace-api/landlock.rst
index d639c61cb472..ad587f53fe41 100644
--- a/Documentation/userspace-api/landlock.rst
+++ b/Documentation/userspace-api/landlock.rst
@@ -8,7 +8,7 @@ Landlock: unprivileged access control
=====================================
:Author: Mickaël Salaün
-:Date: October 2024
+:Date: January 2025
The goal of Landlock is to enable restriction of ambient rights (e.g. global
filesystem or network access) for a set of processes. Because Landlock
@@ -329,11 +329,11 @@ non-sandboxed process, we can specify this restriction with
A sandboxed process can connect to a non-sandboxed process when its domain is
not scoped. If a process's domain is scoped, it can only connect to sockets
created by processes in the same scope.
-Moreover, If a process is scoped to send signal to a non-scoped process, it can
+Moreover, if a process is scoped to send signal to a non-scoped process, it can
only send signals to processes in the same scope.
A connected datagram socket behaves like a stream socket when its domain is
-scoped, meaning if the domain is scoped after the socket is connected , it can
+scoped, meaning if the domain is scoped after the socket is connected, it can
still :manpage:`send(2)` data just like a stream socket. However, in the same
scenario, a non-connected datagram socket cannot send data (with
:manpage:`sendto(2)`) outside its scope.
diff --git a/MAINTAINERS b/MAINTAINERS
index 4ff26fa94895..8e0736dc2ee0 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1046,14 +1046,14 @@ F: drivers/crypto/ccp/hsti.*
AMD DISPLAY CORE
M: Harry Wentland <harry.wentland@amd.com>
M: Leo Li <sunpeng.li@amd.com>
-M: Rodrigo Siqueira <Rodrigo.Siqueira@amd.com>
+R: Rodrigo Siqueira <siqueira@igalia.com>
L: amd-gfx@lists.freedesktop.org
S: Supported
T: git https://gitlab.freedesktop.org/agd5f/linux.git
F: drivers/gpu/drm/amd/display/
AMD DISPLAY CORE - DML
-M: Chaitanya Dhere <chaitanya.dhere@amd.com>
+M: Austin Zheng <austin.zheng@amd.com>
M: Jun Lei <jun.lei@amd.com>
S: Supported
F: drivers/gpu/drm/amd/display/dc/dml/
@@ -2878,7 +2878,7 @@ F: drivers/pinctrl/nxp/
ARM/NXP S32G/S32R DWMAC ETHERNET DRIVER
M: Jan Petrous <jan.petrous@oss.nxp.com>
-L: NXP S32 Linux Team <s32@nxp.com>
+R: s32@nxp.com
S: Maintained
F: Documentation/devicetree/bindings/net/nxp,s32-dwmac.yaml
F: drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c
@@ -5856,7 +5856,6 @@ F: Documentation/security/snp-tdx-threat-model.rst
CONFIGFS
M: Joel Becker <jlbec@evilplan.org>
-M: Christoph Hellwig <hch@lst.de>
S: Supported
T: git git://git.infradead.org/users/hch/configfs.git
F: fs/configfs/
@@ -5927,6 +5926,17 @@ F: tools/testing/selftests/cgroup/test_cpuset.c
F: tools/testing/selftests/cgroup/test_cpuset_prs.sh
F: tools/testing/selftests/cgroup/test_cpuset_v1_base.sh
+CONTROL GROUP - DEVICE MEMORY CONTROLLER (DMEM)
+M: Maarten Lankhorst <dev@lankhorst.se>
+M: Maxime Ripard <mripard@kernel.org>
+M: Natalie Vock <natalie.vock@gmx.de>
+L: cgroups@vger.kernel.org
+L: dri-devel@lists.freedesktop.org
+S: Maintained
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
+F: include/linux/cgroup_dmem.h
+F: kernel/cgroup/dmem.c
+
CONTROL GROUP - MEMORY RESOURCE CONTROLLER (MEMCG)
M: Johannes Weiner <hannes@cmpxchg.org>
M: Michal Hocko <mhocko@kernel.org>
@@ -6879,7 +6889,6 @@ F: kernel/dma/map_benchmark.c
F: tools/testing/selftests/dma/
DMA MAPPING HELPERS
-M: Christoph Hellwig <hch@lst.de>
M: Marek Szyprowski <m.szyprowski@samsung.com>
R: Robin Murphy <robin.murphy@arm.com>
L: iommu@lists.linux.dev
@@ -15682,7 +15691,7 @@ F: include/uapi/linux/cciss*.h
MICROSOFT MANA RDMA DRIVER
M: Long Li <longli@microsoft.com>
-M: Ajay Sharma <sharmaajay@microsoft.com>
+M: Konstantin Taranov <kotaranov@microsoft.com>
L: linux-rdma@vger.kernel.org
S: Supported
F: drivers/infiniband/hw/mana/
@@ -19657,7 +19666,6 @@ F: drivers/net/wireless/quantenna
RADEON and AMDGPU DRM DRIVERS
M: Alex Deucher <alexander.deucher@amd.com>
M: Christian König <christian.koenig@amd.com>
-M: Xinhui Pan <Xinhui.Pan@amd.com>
L: amd-gfx@lists.freedesktop.org
S: Supported
B: https://gitlab.freedesktop.org/drm/amd/-/issues
@@ -20330,6 +20338,7 @@ RISC-V ARCHITECTURE
M: Paul Walmsley <paul.walmsley@sifive.com>
M: Palmer Dabbelt <palmer@dabbelt.com>
M: Albert Ou <aou@eecs.berkeley.edu>
+R: Alexandre Ghiti <alex@ghiti.fr>
L: linux-riscv@lists.infradead.org
S: Supported
Q: https://patchwork.kernel.org/project/linux-riscv/list/
@@ -21923,10 +21932,13 @@ F: sound/soc/uniphier/
SOCKET TIMESTAMPING
M: Willem de Bruijn <willemdebruijn.kernel@gmail.com>
+R: Jason Xing <kernelxing@tencent.com>
S: Maintained
F: Documentation/networking/timestamping.rst
F: include/linux/net_tstamp.h
F: include/uapi/linux/net_tstamp.h
+F: tools/testing/selftests/bpf/*/net_timestamping*
+F: tools/testing/selftests/net/*timestamp*
F: tools/testing/selftests/net/so_txtime.c
SOEKRIS NET48XX LED SUPPORT
diff --git a/arch/arm64/include/asm/hugetlb.h b/arch/arm64/include/asm/hugetlb.h
index c6dff3e69539..07fbf5bf85a7 100644
--- a/arch/arm64/include/asm/hugetlb.h
+++ b/arch/arm64/include/asm/hugetlb.h
@@ -42,8 +42,8 @@ extern int huge_ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
pte_t pte, int dirty);
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
-extern pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep);
+extern pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, unsigned long sz);
#define __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT
extern void huge_ptep_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pte_t *ptep);
@@ -76,12 +76,22 @@ static inline void flush_hugetlb_tlb_range(struct vm_area_struct *vma,
{
unsigned long stride = huge_page_size(hstate_vma(vma));
- if (stride == PMD_SIZE)
- __flush_tlb_range(vma, start, end, stride, false, 2);
- else if (stride == PUD_SIZE)
- __flush_tlb_range(vma, start, end, stride, false, 1);
- else
- __flush_tlb_range(vma, start, end, PAGE_SIZE, false, 0);
+ switch (stride) {
+#ifndef __PAGETABLE_PMD_FOLDED
+ case PUD_SIZE:
+ __flush_tlb_range(vma, start, end, PUD_SIZE, false, 1);
+ break;
+#endif
+ case CONT_PMD_SIZE:
+ case PMD_SIZE:
+ __flush_tlb_range(vma, start, end, PMD_SIZE, false, 2);
+ break;
+ case CONT_PTE_SIZE:
+ __flush_tlb_range(vma, start, end, PAGE_SIZE, false, 3);
+ break;
+ default:
+ __flush_tlb_range(vma, start, end, PAGE_SIZE, false, TLBI_TTL_UNKNOWN);
+ }
}
#endif /* __ASM_HUGETLB_H */
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 8d94a6c0ed5c..c2417a424b98 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -119,7 +119,7 @@
#define TCR_EL2_IRGN0_MASK TCR_IRGN0_MASK
#define TCR_EL2_T0SZ_MASK 0x3f
#define TCR_EL2_MASK (TCR_EL2_TG0_MASK | TCR_EL2_SH0_MASK | \
- TCR_EL2_ORGN0_MASK | TCR_EL2_IRGN0_MASK | TCR_EL2_T0SZ_MASK)
+ TCR_EL2_ORGN0_MASK | TCR_EL2_IRGN0_MASK)
/* VTCR_EL2 Registers bits */
#define VTCR_EL2_DS TCR_EL2_DS
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 3a7ec98ef123..d919557af5e5 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -1259,7 +1259,7 @@ int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu,
extern unsigned int __ro_after_init kvm_arm_vmid_bits;
int __init kvm_arm_vmid_alloc_init(void);
void __init kvm_arm_vmid_alloc_free(void);
-bool kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid);
+void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid);
void kvm_arm_vmid_clear_active(void);
static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch)
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index b8e55a441282..0160b4924351 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -560,6 +560,16 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
last_ran = this_cpu_ptr(mmu->last_vcpu_ran);
/*
+ * Ensure a VMID is allocated for the MMU before programming VTTBR_EL2,
+ * which happens eagerly in VHE.
+ *
+ * Also, the VMID allocator only preserves VMIDs that are active at the
+ * time of rollover, so KVM might need to grab a new VMID for the MMU if
+ * this is called from kvm_sched_in().
+ */
+ kvm_arm_vmid_update(&mmu->vmid);
+
+ /*
* We guarantee that both TLBs and I-cache are private to each
* vcpu. If detecting that a vcpu from the same VM has
* previously run on the same physical CPU, call into the
@@ -1138,18 +1148,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
*/
preempt_disable();
- /*
- * The VMID allocator only tracks active VMIDs per
- * physical CPU, and therefore the VMID allocated may not be
- * preserved on VMID roll-over if the task was preempted,
- * making a thread's VMID inactive. So we need to call
- * kvm_arm_vmid_update() in non-premptible context.
- */
- if (kvm_arm_vmid_update(&vcpu->arch.hw_mmu->vmid) &&
- has_vhe())
- __load_stage2(vcpu->arch.hw_mmu,
- vcpu->arch.hw_mmu->arch);
-
kvm_pmu_flush_hwstate(vcpu);
local_irq_disable();
@@ -1980,7 +1978,7 @@ static int kvm_init_vector_slots(void)
static void __init cpu_prepare_hyp_mode(int cpu, u32 hyp_va_bits)
{
struct kvm_nvhe_init_params *params = per_cpu_ptr_nvhe_sym(kvm_init_params, cpu);
- unsigned long tcr, ips;
+ unsigned long tcr;
/*
* Calculate the raw per-cpu offset without a translation from the
@@ -1994,19 +1992,18 @@ static void __init cpu_prepare_hyp_mode(int cpu, u32 hyp_va_bits)
params->mair_el2 = read_sysreg(mair_el1);
tcr = read_sysreg(tcr_el1);
- ips = FIELD_GET(TCR_IPS_MASK, tcr);
if (cpus_have_final_cap(ARM64_KVM_HVHE)) {
+ tcr &= ~(TCR_HD | TCR_HA | TCR_A1 | TCR_T0SZ_MASK);
tcr |= TCR_EPD1_MASK;
} else {
+ unsigned long ips = FIELD_GET(TCR_IPS_MASK, tcr);
+
tcr &= TCR_EL2_MASK;
- tcr |= TCR_EL2_RES1;
+ tcr |= TCR_EL2_RES1 | FIELD_PREP(TCR_EL2_PS_MASK, ips);
+ if (lpa2_is_enabled())
+ tcr |= TCR_EL2_DS;
}
- tcr &= ~TCR_T0SZ_MASK;
tcr |= TCR_T0SZ(hyp_va_bits);
- tcr &= ~TCR_EL2_PS_MASK;
- tcr |= FIELD_PREP(TCR_EL2_PS_MASK, ips);
- if (lpa2_is_enabled())
- tcr |= TCR_EL2_DS;
params->tcr_el2 = tcr;
params->pgd_pa = kvm_mmu_get_httbr();
diff --git a/arch/arm64/kvm/vmid.c b/arch/arm64/kvm/vmid.c
index 806223b7022a..7fe8ba1a2851 100644
--- a/arch/arm64/kvm/vmid.c
+++ b/arch/arm64/kvm/vmid.c
@@ -135,11 +135,10 @@ void kvm_arm_vmid_clear_active(void)
atomic64_set(this_cpu_ptr(&active_vmids), VMID_ACTIVE_INVALID);
}
-bool kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid)
+void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid)
{
unsigned long flags;
u64 vmid, old_active_vmid;
- bool updated = false;
vmid = atomic64_read(&kvm_vmid->id);
@@ -157,21 +156,17 @@ bool kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid)
if (old_active_vmid != 0 && vmid_gen_match(vmid) &&
0 != atomic64_cmpxchg_relaxed(this_cpu_ptr(&active_vmids),
old_active_vmid, vmid))
- return false;
+ return;
raw_spin_lock_irqsave(&cpu_vmid_lock, flags);
/* Check that our VMID belongs to the current generation. */
vmid = atomic64_read(&kvm_vmid->id);
- if (!vmid_gen_match(vmid)) {
+ if (!vmid_gen_match(vmid))
vmid = new_vmid(kvm_vmid);
- updated = true;
- }
atomic64_set(this_cpu_ptr(&active_vmids), vmid);
raw_spin_unlock_irqrestore(&cpu_vmid_lock, flags);
-
- return updated;
}
/*
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index 98a2a0e64e25..b3a7fafe8892 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -100,20 +100,11 @@ static int find_num_contig(struct mm_struct *mm, unsigned long addr,
static inline int num_contig_ptes(unsigned long size, size_t *pgsize)
{
- int contig_ptes = 0;
+ int contig_ptes = 1;
*pgsize = size;
switch (size) {
-#ifndef __PAGETABLE_PMD_FOLDED
- case PUD_SIZE:
- if (pud_sect_supported())
- contig_ptes = 1;
- break;
-#endif
- case PMD_SIZE:
- contig_ptes = 1;
- break;
case CONT_PMD_SIZE:
*pgsize = PMD_SIZE;
contig_ptes = CONT_PMDS;
@@ -122,6 +113,8 @@ static inline int num_contig_ptes(unsigned long size, size_t *pgsize)
*pgsize = PAGE_SIZE;
contig_ptes = CONT_PTES;
break;
+ default:
+ WARN_ON(!__hugetlb_valid_size(size));
}
return contig_ptes;
@@ -163,24 +156,23 @@ static pte_t get_clear_contig(struct mm_struct *mm,
unsigned long pgsize,
unsigned long ncontig)
{
- pte_t orig_pte = __ptep_get(ptep);
- unsigned long i;
-
- for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) {
- pte_t pte = __ptep_get_and_clear(mm, addr, ptep);
-
- /*
- * If HW_AFDBM is enabled, then the HW could turn on
- * the dirty or accessed bit for any page in the set,
- * so check them all.
- */
- if (pte_dirty(pte))
- orig_pte = pte_mkdirty(orig_pte);
-
- if (pte_young(pte))
- orig_pte = pte_mkyoung(orig_pte);
+ pte_t pte, tmp_pte;
+ bool present;
+
+ pte = __ptep_get_and_clear(mm, addr, ptep);
+ present = pte_present(pte);
+ while (--ncontig) {
+ ptep++;
+ addr += pgsize;
+ tmp_pte = __ptep_get_and_clear(mm, addr, ptep);
+ if (present) {
+ if (pte_dirty(tmp_pte))
+ pte = pte_mkdirty(pte);
+ if (pte_young(tmp_pte))
+ pte = pte_mkyoung(pte);
+ }
}
- return orig_pte;
+ return pte;
}
static pte_t get_clear_contig_flush(struct mm_struct *mm,
@@ -396,18 +388,13 @@ void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
__pte_clear(mm, addr, ptep);
}
-pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep)
+pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, unsigned long sz)
{
int ncontig;
size_t pgsize;
- pte_t orig_pte = __ptep_get(ptep);
-
- if (!pte_cont(orig_pte))
- return __ptep_get_and_clear(mm, addr, ptep);
-
- ncontig = find_num_contig(mm, addr, ptep, &pgsize);
+ ncontig = num_contig_ptes(sz, &pgsize);
return get_clear_contig(mm, addr, ptep, pgsize, ncontig);
}
@@ -549,6 +536,8 @@ bool __init arch_hugetlb_valid_size(unsigned long size)
pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
{
+ unsigned long psize = huge_page_size(hstate_vma(vma));
+
if (alternative_has_cap_unlikely(ARM64_WORKAROUND_2645198)) {
/*
* Break-before-make (BBM) is required for all user space mappings
@@ -558,7 +547,7 @@ pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr
if (pte_user_exec(__ptep_get(ptep)))
return huge_ptep_clear_flush(vma, addr, ptep);
}
- return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
+ return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep, psize);
}
void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep,
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 9c0b8d9558fc..ccdef53872a0 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -279,12 +279,7 @@ void __init arm64_memblock_init(void)
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
extern u16 memstart_offset_seed;
-
- /*
- * Use the sanitised version of id_aa64mmfr0_el1 so that linear
- * map randomization can be enabled by shrinking the IPA space.
- */
- u64 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
+ u64 mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
int parange = cpuid_feature_extract_unsigned_field(
mmfr0, ID_AA64MMFR0_EL1_PARANGE_SHIFT);
s64 range = linear_region_size -
diff --git a/arch/loongarch/include/asm/hugetlb.h b/arch/loongarch/include/asm/hugetlb.h
index c8e4057734d0..4dc4b3e04225 100644
--- a/arch/loongarch/include/asm/hugetlb.h
+++ b/arch/loongarch/include/asm/hugetlb.h
@@ -36,7 +36,8 @@ static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep)
+ unsigned long addr, pte_t *ptep,
+ unsigned long sz)
{
pte_t clear;
pte_t pte = ptep_get(ptep);
@@ -51,8 +52,9 @@ static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
pte_t pte;
+ unsigned long sz = huge_page_size(hstate_vma(vma));
- pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
+ pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep, sz);
flush_tlb_page(vma, addr);
return pte;
}
diff --git a/arch/mips/include/asm/hugetlb.h b/arch/mips/include/asm/hugetlb.h
index d0a86ce83de9..fbc71ddcf0f6 100644
--- a/arch/mips/include/asm/hugetlb.h
+++ b/arch/mips/include/asm/hugetlb.h
@@ -27,7 +27,8 @@ static inline int prepare_hugepage_range(struct file *file,
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep)
+ unsigned long addr, pte_t *ptep,
+ unsigned long sz)
{
pte_t clear;
pte_t pte = *ptep;
@@ -42,13 +43,14 @@ static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
pte_t pte;
+ unsigned long sz = huge_page_size(hstate_vma(vma));
/*
* clear the huge pte entry firstly, so that the other smp threads will
* not get old pte entry after finishing flush_tlb_page and before
* setting new huge pte entry
*/
- pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
+ pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep, sz);
flush_tlb_page(vma, addr);
return pte;
}
diff --git a/arch/parisc/include/asm/hugetlb.h b/arch/parisc/include/asm/hugetlb.h
index 5b3a5429f71b..21e9ace17739 100644
--- a/arch/parisc/include/asm/hugetlb.h
+++ b/arch/parisc/include/asm/hugetlb.h
@@ -10,7 +10,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep);
+ pte_t *ptep, unsigned long sz);
#define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH
static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
diff --git a/arch/parisc/mm/hugetlbpage.c b/arch/parisc/mm/hugetlbpage.c
index e9d18cf25b79..a94fe546d434 100644
--- a/arch/parisc/mm/hugetlbpage.c
+++ b/arch/parisc/mm/hugetlbpage.c
@@ -126,7 +126,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep)
+ pte_t *ptep, unsigned long sz)
{
pte_t entry;
diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h
index dad2e7980f24..86326587e58d 100644
--- a/arch/powerpc/include/asm/hugetlb.h
+++ b/arch/powerpc/include/asm/hugetlb.h
@@ -45,7 +45,8 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep)
+ unsigned long addr, pte_t *ptep,
+ unsigned long sz)
{
return __pte(pte_update(mm, addr, ptep, ~0UL, 0, 1));
}
@@ -55,8 +56,9 @@ static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
pte_t pte;
+ unsigned long sz = huge_page_size(hstate_vma(vma));
- pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
+ pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep, sz);
flush_hugetlb_page(vma, addr);
return pte;
}
diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h
index 4cadc56220fe..427c41dde643 100644
--- a/arch/riscv/include/asm/cmpxchg.h
+++ b/arch/riscv/include/asm/cmpxchg.h
@@ -231,7 +231,7 @@
__arch_cmpxchg(".w", ".w" sc_sfx, ".w" cas_sfx, \
sc_prepend, sc_append, \
cas_prepend, cas_append, \
- __ret, __ptr, (long), __old, __new); \
+ __ret, __ptr, (long)(int)(long), __old, __new); \
break; \
case 8: \
__arch_cmpxchg(".d", ".d" sc_sfx, ".d" cas_sfx, \
diff --git a/arch/riscv/include/asm/futex.h b/arch/riscv/include/asm/futex.h
index 72be100afa23..90c86b115e00 100644
--- a/arch/riscv/include/asm/futex.h
+++ b/arch/riscv/include/asm/futex.h
@@ -93,7 +93,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
_ASM_EXTABLE_UACCESS_ERR(1b, 3b, %[r]) \
_ASM_EXTABLE_UACCESS_ERR(2b, 3b, %[r]) \
: [r] "+r" (ret), [v] "=&r" (val), [u] "+m" (*uaddr), [t] "=&r" (tmp)
- : [ov] "Jr" (oldval), [nv] "Jr" (newval)
+ : [ov] "Jr" ((long)(int)oldval), [nv] "Jr" (newval)
: "memory");
__disable_user_access();
diff --git a/arch/riscv/include/asm/hugetlb.h b/arch/riscv/include/asm/hugetlb.h
index faf3624d8057..446126497768 100644
--- a/arch/riscv/include/asm/hugetlb.h
+++ b/arch/riscv/include/asm/hugetlb.h
@@ -28,7 +28,8 @@ void set_huge_pte_at(struct mm_struct *mm,
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep);
+ unsigned long addr, pte_t *ptep,
+ unsigned long sz);
#define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH
pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
diff --git a/arch/riscv/kernel/cacheinfo.c b/arch/riscv/kernel/cacheinfo.c
index 2d40736fc37c..26b085dbdd07 100644
--- a/arch/riscv/kernel/cacheinfo.c
+++ b/arch/riscv/kernel/cacheinfo.c
@@ -108,11 +108,11 @@ int populate_cache_leaves(unsigned int cpu)
if (!np)
return -ENOENT;
- if (of_property_read_bool(np, "cache-size"))
+ if (of_property_present(np, "cache-size"))
ci_leaf_init(this_leaf++, CACHE_TYPE_UNIFIED, level);
- if (of_property_read_bool(np, "i-cache-size"))
+ if (of_property_present(np, "i-cache-size"))
ci_leaf_init(this_leaf++, CACHE_TYPE_INST, level);
- if (of_property_read_bool(np, "d-cache-size"))
+ if (of_property_present(np, "d-cache-size"))
ci_leaf_init(this_leaf++, CACHE_TYPE_DATA, level);
prev = np;
@@ -125,11 +125,11 @@ int populate_cache_leaves(unsigned int cpu)
break;
if (level <= levels)
break;
- if (of_property_read_bool(np, "cache-size"))
+ if (of_property_present(np, "cache-size"))
ci_leaf_init(this_leaf++, CACHE_TYPE_UNIFIED, level);
- if (of_property_read_bool(np, "i-cache-size"))
+ if (of_property_present(np, "i-cache-size"))
ci_leaf_init(this_leaf++, CACHE_TYPE_INST, level);
- if (of_property_read_bool(np, "d-cache-size"))
+ if (of_property_present(np, "d-cache-size"))
ci_leaf_init(this_leaf++, CACHE_TYPE_DATA, level);
levels = level;
}
diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c
index c6ba750536c3..40ac72e407b6 100644
--- a/arch/riscv/kernel/cpufeature.c
+++ b/arch/riscv/kernel/cpufeature.c
@@ -479,7 +479,7 @@ static void __init riscv_resolve_isa(unsigned long *source_isa,
if (bit < RISCV_ISA_EXT_BASE)
*this_hwcap |= isa2hwcap[bit];
}
- } while (loop && memcmp(prev_resolved_isa, resolved_isa, sizeof(prev_resolved_isa)));
+ } while (loop && !bitmap_equal(prev_resolved_isa, resolved_isa, RISCV_ISA_EXT_MAX));
}
static void __init match_isa_ext(const char *name, const char *name_end, unsigned long *bitmap)
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
index f1793630fc51..4fe45daa6281 100644
--- a/arch/riscv/kernel/setup.c
+++ b/arch/riscv/kernel/setup.c
@@ -322,8 +322,8 @@ void __init setup_arch(char **cmdline_p)
riscv_init_cbo_blocksizes();
riscv_fill_hwcap();
- init_rt_signal_env();
apply_boot_alternatives();
+ init_rt_signal_env();
if (IS_ENABLED(CONFIG_RISCV_ISA_ZICBOM) &&
riscv_isa_extension_available(NULL, ZICBOM))
diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c
index 94e905eea1de..08378fea3a11 100644
--- a/arch/riscv/kernel/signal.c
+++ b/arch/riscv/kernel/signal.c
@@ -215,12 +215,6 @@ static size_t get_rt_frame_size(bool cal_all)
if (cal_all || riscv_v_vstate_query(task_pt_regs(current)))
total_context_size += riscv_v_sc_size;
}
- /*
- * Preserved a __riscv_ctx_hdr for END signal context header if an
- * extension uses __riscv_extra_ext_header
- */
- if (total_context_size)
- total_context_size += sizeof(struct __riscv_ctx_hdr);
frame_size += total_context_size;
diff --git a/arch/riscv/kvm/aia_imsic.c b/arch/riscv/kvm/aia_imsic.c
index a8085cd8215e..29ef9c2133a9 100644
--- a/arch/riscv/kvm/aia_imsic.c
+++ b/arch/riscv/kvm/aia_imsic.c
@@ -974,7 +974,6 @@ int kvm_riscv_vcpu_aia_imsic_inject(struct kvm_vcpu *vcpu,
if (imsic->vsfile_cpu >= 0) {
writel(iid, imsic->vsfile_va + IMSIC_MMIO_SETIPNUM_LE);
- kvm_vcpu_kick(vcpu);
} else {
eix = &imsic->swfile->eix[iid / BITS_PER_TYPE(u64)];
set_bit(iid & (BITS_PER_TYPE(u64) - 1), eix->eip);
diff --git a/arch/riscv/kvm/vcpu_sbi_hsm.c b/arch/riscv/kvm/vcpu_sbi_hsm.c
index dce667f4b6ab..3070bb31745d 100644
--- a/arch/riscv/kvm/vcpu_sbi_hsm.c
+++ b/arch/riscv/kvm/vcpu_sbi_hsm.c
@@ -9,6 +9,7 @@
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/kvm_host.h>
+#include <linux/wordpart.h>
#include <asm/sbi.h>
#include <asm/kvm_vcpu_sbi.h>
@@ -79,12 +80,12 @@ static int kvm_sbi_hsm_vcpu_get_status(struct kvm_vcpu *vcpu)
target_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, target_vcpuid);
if (!target_vcpu)
return SBI_ERR_INVALID_PARAM;
- if (!kvm_riscv_vcpu_stopped(target_vcpu))
- return SBI_HSM_STATE_STARTED;
- else if (vcpu->stat.generic.blocking)
+ if (kvm_riscv_vcpu_stopped(target_vcpu))
+ return SBI_HSM_STATE_STOPPED;
+ else if (target_vcpu->stat.generic.blocking)
return SBI_HSM_STATE_SUSPENDED;
else
- return SBI_HSM_STATE_STOPPED;
+ return SBI_HSM_STATE_STARTED;
}
static int kvm_sbi_ext_hsm_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
@@ -109,7 +110,7 @@ static int kvm_sbi_ext_hsm_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
}
return 0;
case SBI_EXT_HSM_HART_SUSPEND:
- switch (cp->a0) {
+ switch (lower_32_bits(cp->a0)) {
case SBI_HSM_SUSPEND_RET_DEFAULT:
kvm_riscv_vcpu_wfi(vcpu);
break;
diff --git a/arch/riscv/kvm/vcpu_sbi_replace.c b/arch/riscv/kvm/vcpu_sbi_replace.c
index 9c2ab3dfa93a..5fbf3f94f1e8 100644
--- a/arch/riscv/kvm/vcpu_sbi_replace.c
+++ b/arch/riscv/kvm/vcpu_sbi_replace.c
@@ -21,7 +21,7 @@ static int kvm_sbi_ext_time_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
u64 next_cycle;
if (cp->a6 != SBI_EXT_TIME_SET_TIMER) {
- retdata->err_val = SBI_ERR_INVALID_PARAM;
+ retdata->err_val = SBI_ERR_NOT_SUPPORTED;
return 0;
}
@@ -51,9 +51,10 @@ static int kvm_sbi_ext_ipi_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
unsigned long hmask = cp->a0;
unsigned long hbase = cp->a1;
+ unsigned long hart_bit = 0, sentmask = 0;
if (cp->a6 != SBI_EXT_IPI_SEND_IPI) {
- retdata->err_val = SBI_ERR_INVALID_PARAM;
+ retdata->err_val = SBI_ERR_NOT_SUPPORTED;
return 0;
}
@@ -62,15 +63,23 @@ static int kvm_sbi_ext_ipi_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
if (hbase != -1UL) {
if (tmp->vcpu_id < hbase)
continue;
- if (!(hmask & (1UL << (tmp->vcpu_id - hbase))))
+ hart_bit = tmp->vcpu_id - hbase;
+ if (hart_bit >= __riscv_xlen)
+ goto done;
+ if (!(hmask & (1UL << hart_bit)))
continue;
}
ret = kvm_riscv_vcpu_set_interrupt(tmp, IRQ_VS_SOFT);
if (ret < 0)
break;
+ sentmask |= 1UL << hart_bit;
kvm_riscv_vcpu_pmu_incr_fw(tmp, SBI_PMU_FW_IPI_RCVD);
}
+done:
+ if (hbase != -1UL && (hmask ^ sentmask))
+ retdata->err_val = SBI_ERR_INVALID_PARAM;
+
return ret;
}
diff --git a/arch/riscv/kvm/vcpu_sbi_system.c b/arch/riscv/kvm/vcpu_sbi_system.c
index 5d55e08791fa..bc0ebba89003 100644
--- a/arch/riscv/kvm/vcpu_sbi_system.c
+++ b/arch/riscv/kvm/vcpu_sbi_system.c
@@ -4,6 +4,7 @@
*/
#include <linux/kvm_host.h>
+#include <linux/wordpart.h>
#include <asm/kvm_vcpu_sbi.h>
#include <asm/sbi.h>
@@ -19,7 +20,7 @@ static int kvm_sbi_ext_susp_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
switch (funcid) {
case SBI_EXT_SUSP_SYSTEM_SUSPEND:
- if (cp->a0 != SBI_SUSP_SLEEP_TYPE_SUSPEND_TO_RAM) {
+ if (lower_32_bits(cp->a0) != SBI_SUSP_SLEEP_TYPE_SUSPEND_TO_RAM) {
retdata->err_val = SBI_ERR_INVALID_PARAM;
return 0;
}
diff --git a/arch/riscv/mm/hugetlbpage.c b/arch/riscv/mm/hugetlbpage.c
index 42314f093922..b4a78a4b35cf 100644
--- a/arch/riscv/mm/hugetlbpage.c
+++ b/arch/riscv/mm/hugetlbpage.c
@@ -293,7 +293,7 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr,
- pte_t *ptep)
+ pte_t *ptep, unsigned long sz)
{
pte_t orig_pte = ptep_get(ptep);
int pte_num;
diff --git a/arch/s390/include/asm/hugetlb.h b/arch/s390/include/asm/hugetlb.h
index 7c52acaf9f82..663e87220e89 100644
--- a/arch/s390/include/asm/hugetlb.h
+++ b/arch/s390/include/asm/hugetlb.h
@@ -25,8 +25,16 @@ void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
#define __HAVE_ARCH_HUGE_PTEP_GET
pte_t huge_ptep_get(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
+pte_t __huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep);
+
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
-pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
+static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep,
+ unsigned long sz)
+{
+ return __huge_ptep_get_and_clear(mm, addr, ptep);
+}
static inline void arch_clear_hugetlb_flags(struct folio *folio)
{
@@ -48,7 +56,7 @@ static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep)
{
- return huge_ptep_get_and_clear(vma->vm_mm, address, ptep);
+ return __huge_ptep_get_and_clear(vma->vm_mm, address, ptep);
}
#define __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS
@@ -59,7 +67,7 @@ static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
int changed = !pte_same(huge_ptep_get(vma->vm_mm, addr, ptep), pte);
if (changed) {
- huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
+ __huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
__set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
}
return changed;
@@ -69,7 +77,7 @@ static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
- pte_t pte = huge_ptep_get_and_clear(mm, addr, ptep);
+ pte_t pte = __huge_ptep_get_and_clear(mm, addr, ptep);
__set_huge_pte_at(mm, addr, ptep, pte_wrprotect(pte));
}
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
index d9ce199953de..2e568f175cd4 100644
--- a/arch/s390/mm/hugetlbpage.c
+++ b/arch/s390/mm/hugetlbpage.c
@@ -188,8 +188,8 @@ pte_t huge_ptep_get(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
return __rste_to_pte(pte_val(*ptep));
}
-pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep)
+pte_t __huge_ptep_get_and_clear(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
{
pte_t pte = huge_ptep_get(mm, addr, ptep);
pmd_t *pmdp = (pmd_t *) ptep;
diff --git a/arch/sparc/include/asm/hugetlb.h b/arch/sparc/include/asm/hugetlb.h
index c714ca6a05aa..e7a9cdd498dc 100644
--- a/arch/sparc/include/asm/hugetlb.h
+++ b/arch/sparc/include/asm/hugetlb.h
@@ -20,7 +20,7 @@ void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep);
+ pte_t *ptep, unsigned long sz);
#define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH
static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
index eee601a0d2cf..80504148d8a5 100644
--- a/arch/sparc/mm/hugetlbpage.c
+++ b/arch/sparc/mm/hugetlbpage.c
@@ -260,7 +260,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
}
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep)
+ pte_t *ptep, unsigned long sz)
{
unsigned int i, nptes, orig_shift, shift;
unsigned long size;
diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
index 94941c5a10ac..14db5b85114c 100644
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
@@ -190,6 +190,7 @@ static __always_inline bool int80_is_external(void)
/**
* do_int80_emulation - 32-bit legacy syscall C entry from asm
+ * @regs: syscall arguments in struct pt_args on the stack.
*
* This entry point can be used by 32-bit and 64-bit programs to perform
* 32-bit system calls. Instances of INT $0x80 can be found inline in
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 8f218ac0d445..2092d615333d 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -628,7 +628,7 @@ int x86_pmu_hw_config(struct perf_event *event)
if (event->attr.type == event->pmu->type)
event->hw.config |= x86_pmu_get_event_config(event);
- if (event->attr.sample_period && x86_pmu.limit_period) {
+ if (!event->attr.freq && x86_pmu.limit_period) {
s64 left = event->attr.sample_period;
x86_pmu.limit_period(event, &left);
if (left > event->attr.sample_period)
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index cdcebf30468a..cdb19e3ba3aa 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -3952,6 +3952,85 @@ static inline bool intel_pmu_has_cap(struct perf_event *event, int idx)
return test_bit(idx, (unsigned long *)&intel_cap->capabilities);
}
+static u64 intel_pmu_freq_start_period(struct perf_event *event)
+{
+ int type = event->attr.type;
+ u64 config, factor;
+ s64 start;
+
+ /*
+ * The 127 is the lowest possible recommended SAV (sample after value)
+ * for a 4000 freq (default freq), according to the event list JSON file.
+ * Also, assume the workload is idle 50% time.
+ */
+ factor = 64 * 4000;
+ if (type != PERF_TYPE_HARDWARE && type != PERF_TYPE_HW_CACHE)
+ goto end;
+
+ /*
+ * The estimation of the start period in the freq mode is
+ * based on the below assumption.
+ *
+ * For a cycles or an instructions event, 1GHZ of the
+ * underlying platform, 1 IPC. The workload is idle 50% time.
+ * The start period = 1,000,000,000 * 1 / freq / 2.
+ * = 500,000,000 / freq
+ *
+ * Usually, the branch-related events occur less than the
+ * instructions event. According to the Intel event list JSON
+ * file, the SAV (sample after value) of a branch-related event
+ * is usually 1/4 of an instruction event.
+ * The start period of branch-related events = 125,000,000 / freq.
+ *
+ * The cache-related events occurs even less. The SAV is usually
+ * 1/20 of an instruction event.
+ * The start period of cache-related events = 25,000,000 / freq.
+ */
+ config = event->attr.config & PERF_HW_EVENT_MASK;
+ if (type == PERF_TYPE_HARDWARE) {
+ switch (config) {
+ case PERF_COUNT_HW_CPU_CYCLES:
+ case PERF_COUNT_HW_INSTRUCTIONS:
+ case PERF_COUNT_HW_BUS_CYCLES:
+ case PERF_COUNT_HW_STALLED_CYCLES_FRONTEND:
+ case PERF_COUNT_HW_STALLED_CYCLES_BACKEND:
+ case PERF_COUNT_HW_REF_CPU_CYCLES:
+ factor = 500000000;
+ break;
+ case PERF_COUNT_HW_BRANCH_INSTRUCTIONS:
+ case PERF_COUNT_HW_BRANCH_MISSES:
+ factor = 125000000;
+ break;
+ case PERF_COUNT_HW_CACHE_REFERENCES:
+ case PERF_COUNT_HW_CACHE_MISSES:
+ factor = 25000000;
+ break;
+ default:
+ goto end;
+ }
+ }
+
+ if (type == PERF_TYPE_HW_CACHE)
+ factor = 25000000;
+end:
+ /*
+ * Usually, a prime or a number with less factors (close to prime)
+ * is chosen as an SAV, which makes it less likely that the sampling
+ * period synchronizes with some periodic event in the workload.
+ * Minus 1 to make it at least avoiding values near power of twos
+ * for the default freq.
+ */
+ start = DIV_ROUND_UP_ULL(factor, event->attr.sample_freq) - 1;
+
+ if (start > x86_pmu.max_period)
+ start = x86_pmu.max_period;
+
+ if (x86_pmu.limit_period)
+ x86_pmu.limit_period(event, &start);
+
+ return start;
+}
+
static int intel_pmu_hw_config(struct perf_event *event)
{
int ret = x86_pmu_hw_config(event);
@@ -3963,6 +4042,12 @@ static int intel_pmu_hw_config(struct perf_event *event)
if (ret)
return ret;
+ if (event->attr.freq && event->attr.sample_freq) {
+ event->hw.sample_period = intel_pmu_freq_start_period(event);
+ event->hw.last_period = event->hw.sample_period;
+ local64_set(&event->hw.period_left, event->hw.sample_period);
+ }
+
if (event->attr.precise_ip) {
if ((event->attr.config & INTEL_ARCH_EVENT_MASK) == INTEL_FIXED_VLBR_EVENT)
return -EINVAL;
diff --git a/arch/x86/events/rapl.c b/arch/x86/events/rapl.c
index 4952faf03e82..6941f4811bec 100644
--- a/arch/x86/events/rapl.c
+++ b/arch/x86/events/rapl.c
@@ -879,6 +879,7 @@ static const struct x86_cpu_id rapl_model_match[] __initconst = {
X86_MATCH_VFM(INTEL_METEORLAKE_L, &model_skl),
X86_MATCH_VFM(INTEL_ARROWLAKE_H, &model_skl),
X86_MATCH_VFM(INTEL_ARROWLAKE, &model_skl),
+ X86_MATCH_VFM(INTEL_ARROWLAKE_U, &model_skl),
X86_MATCH_VFM(INTEL_LUNARLAKE_M, &model_skl),
{},
};
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c
index 9651275aecd1..dfec2c61e354 100644
--- a/arch/x86/kernel/cpu/cyrix.c
+++ b/arch/x86/kernel/cpu/cyrix.c
@@ -153,8 +153,8 @@ static void geode_configure(void)
u8 ccr3;
local_irq_save(flags);
- /* Suspend on halt power saving and enable #SUSP pin */
- setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x88);
+ /* Suspend on halt power saving */
+ setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x08);
ccr3 = getCx86(CX86_CCR3);
setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c
index 59d23cdf4ed0..dd8748c45529 100644
--- a/arch/x86/kernel/devicetree.c
+++ b/arch/x86/kernel/devicetree.c
@@ -2,6 +2,7 @@
/*
* Architecture specific OF callbacks.
*/
+#include <linux/acpi.h>
#include <linux/export.h>
#include <linux/io.h>
#include <linux/interrupt.h>
@@ -313,6 +314,6 @@ void __init x86_flattree_get_config(void)
if (initial_dtb)
early_memunmap(dt, map_len);
#endif
- if (of_have_populated_dt())
+ if (acpi_disabled && of_have_populated_dt())
x86_init.mpparse.parse_smp_cfg = x86_dtb_parse_smp_config;
}
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 385e3a5fc304..feca4f20b06a 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -25,8 +25,10 @@
#include <asm/posted_intr.h>
#include <asm/irq_remapping.h>
+#if defined(CONFIG_X86_LOCAL_APIC) || defined(CONFIG_X86_THERMAL_VECTOR)
#define CREATE_TRACE_POINTS
#include <asm/trace/irq_vectors.h>
+#endif
DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
EXPORT_PER_CPU_SYMBOL(irq_stat);
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index d4ac4a1f8b81..8160870398b9 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -7460,7 +7460,7 @@ static bool kvm_nx_huge_page_recovery_worker(void *data)
return true;
}
-static void kvm_mmu_start_lpage_recovery(struct once *once)
+static int kvm_mmu_start_lpage_recovery(struct once *once)
{
struct kvm_arch *ka = container_of(once, struct kvm_arch, nx_once);
struct kvm *kvm = container_of(ka, struct kvm, arch);
@@ -7471,13 +7471,14 @@ static void kvm_mmu_start_lpage_recovery(struct once *once)
kvm_nx_huge_page_recovery_worker_kill,
kvm, "kvm-nx-lpage-recovery");
- if (!nx_thread)
- return;
+ if (IS_ERR(nx_thread))
+ return PTR_ERR(nx_thread);
vhost_task_start(nx_thread);
/* Make the task visible only once it is fully started. */
WRITE_ONCE(kvm->arch.nx_huge_page_recovery_thread, nx_thread);
+ return 0;
}
int kvm_mmu_post_init_vm(struct kvm *kvm)
@@ -7485,10 +7486,7 @@ int kvm_mmu_post_init_vm(struct kvm *kvm)
if (nx_hugepage_mitigation_hard_disabled)
return 0;
- call_once(&kvm->arch.nx_once, kvm_mmu_start_lpage_recovery);
- if (!kvm->arch.nx_huge_page_recovery_thread)
- return -ENOMEM;
- return 0;
+ return call_once(&kvm->arch.nx_once, kvm_mmu_start_lpage_recovery);
}
void kvm_mmu_pre_destroy_vm(struct kvm *kvm)
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 8a7af02d466e..ed8a3cb53961 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -5084,6 +5084,17 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
load_vmcs12_host_state(vcpu, vmcs12);
+ /*
+ * Process events if an injectable IRQ or NMI is pending, even
+ * if the event is blocked (RFLAGS.IF is cleared on VM-Exit).
+ * If an event became pending while L2 was active, KVM needs to
+ * either inject the event or request an IRQ/NMI window. SMIs
+ * don't need to be processed as SMM is mutually exclusive with
+ * non-root mode. INIT/SIPI don't need to be checked as INIT
+ * is blocked post-VMXON, and SIPIs are ignored.
+ */
+ if (kvm_cpu_has_injectable_intr(vcpu) || vcpu->arch.nmi_pending)
+ kvm_make_request(KVM_REQ_EVENT, vcpu);
return;
}
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 02159c967d29..6fc4ddc606bd 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -12877,11 +12877,11 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
mutex_unlock(&kvm->slots_lock);
}
kvm_unload_vcpu_mmus(kvm);
+ kvm_destroy_vcpus(kvm);
kvm_x86_call(vm_destroy)(kvm);
kvm_free_msr_filter(srcu_dereference_check(kvm->arch.msr_filter, &kvm->srcu, 1));
kvm_pic_destroy(kvm);
kvm_ioapic_destroy(kvm);
- kvm_destroy_vcpus(kvm);
kvfree(rcu_dereference_check(kvm->arch.apic_map, 1));
kfree(srcu_dereference_check(kvm->arch.pmu_event_filter, &kvm->srcu, 1));
kvm_mmu_uninit_vm(kvm);
diff --git a/block/bio.c b/block/bio.c
index f0c416e5931d..6ac5983ba51e 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -77,7 +77,7 @@ struct bio_slab {
struct kmem_cache *slab;
unsigned int slab_ref;
unsigned int slab_size;
- char name[8];
+ char name[12];
};
static DEFINE_MUTEX(bio_slab_lock);
static DEFINE_XARRAY(bio_slabs);
diff --git a/block/blk-merge.c b/block/blk-merge.c
index c7c85e10cf9c..1d1589c35297 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -329,7 +329,7 @@ int bio_split_rw_at(struct bio *bio, const struct queue_limits *lim,
if (nsegs < lim->max_segments &&
bytes + bv.bv_len <= max_bytes &&
- bv.bv_offset + bv.bv_len <= PAGE_SIZE) {
+ bv.bv_offset + bv.bv_len <= lim->min_segment_size) {
nsegs++;
bytes += bv.bv_len;
} else {
diff --git a/block/blk-settings.c b/block/blk-settings.c
index c44dadc35e1e..b9c6f0ec1c49 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -246,6 +246,7 @@ int blk_validate_limits(struct queue_limits *lim)
{
unsigned int max_hw_sectors;
unsigned int logical_block_sectors;
+ unsigned long seg_size;
int err;
/*
@@ -303,7 +304,7 @@ int blk_validate_limits(struct queue_limits *lim)
max_hw_sectors = min_not_zero(lim->max_hw_sectors,
lim->max_dev_sectors);
if (lim->max_user_sectors) {
- if (lim->max_user_sectors < PAGE_SIZE / SECTOR_SIZE)
+ if (lim->max_user_sectors < BLK_MIN_SEGMENT_SIZE / SECTOR_SIZE)
return -EINVAL;
lim->max_sectors = min(max_hw_sectors, lim->max_user_sectors);
} else if (lim->io_opt > (BLK_DEF_MAX_SECTORS_CAP << SECTOR_SHIFT)) {
@@ -341,7 +342,7 @@ int blk_validate_limits(struct queue_limits *lim)
*/
if (!lim->seg_boundary_mask)
lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
- if (WARN_ON_ONCE(lim->seg_boundary_mask < PAGE_SIZE - 1))
+ if (WARN_ON_ONCE(lim->seg_boundary_mask < BLK_MIN_SEGMENT_SIZE - 1))
return -EINVAL;
/*
@@ -362,10 +363,17 @@ int blk_validate_limits(struct queue_limits *lim)
*/
if (!lim->max_segment_size)
lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
- if (WARN_ON_ONCE(lim->max_segment_size < PAGE_SIZE))
+ if (WARN_ON_ONCE(lim->max_segment_size < BLK_MIN_SEGMENT_SIZE))
return -EINVAL;
}
+ /* setup min segment size for building new segment in fast path */
+ if (lim->seg_boundary_mask > lim->max_segment_size - 1)
+ seg_size = lim->max_segment_size;
+ else
+ seg_size = lim->seg_boundary_mask + 1;
+ lim->min_segment_size = min_t(unsigned int, seg_size, PAGE_SIZE);
+
/*
* We require drivers to at least do logical block aligned I/O, but
* historically could not check for that due to the separate calls
diff --git a/block/blk-zoned.c b/block/blk-zoned.c
index 761ea662ddc3..0c77244a35c9 100644
--- a/block/blk-zoned.c
+++ b/block/blk-zoned.c
@@ -410,13 +410,14 @@ static bool disk_insert_zone_wplug(struct gendisk *disk,
}
}
hlist_add_head_rcu(&zwplug->node, &disk->zone_wplugs_hash[idx]);
+ atomic_inc(&disk->nr_zone_wplugs);
spin_unlock_irqrestore(&disk->zone_wplugs_lock, flags);
return true;
}
-static struct blk_zone_wplug *disk_get_zone_wplug(struct gendisk *disk,
- sector_t sector)
+static struct blk_zone_wplug *disk_get_hashed_zone_wplug(struct gendisk *disk,
+ sector_t sector)
{
unsigned int zno = disk_zone_no(disk, sector);
unsigned int idx = hash_32(zno, disk->zone_wplugs_hash_bits);
@@ -437,6 +438,15 @@ static struct blk_zone_wplug *disk_get_zone_wplug(struct gendisk *disk,
return NULL;
}
+static inline struct blk_zone_wplug *disk_get_zone_wplug(struct gendisk *disk,
+ sector_t sector)
+{
+ if (!atomic_read(&disk->nr_zone_wplugs))
+ return NULL;
+
+ return disk_get_hashed_zone_wplug(disk, sector);
+}
+
static void disk_free_zone_wplug_rcu(struct rcu_head *rcu_head)
{
struct blk_zone_wplug *zwplug =
@@ -503,6 +513,7 @@ static void disk_remove_zone_wplug(struct gendisk *disk,
zwplug->flags |= BLK_ZONE_WPLUG_UNHASHED;
spin_lock_irqsave(&disk->zone_wplugs_lock, flags);
hlist_del_init_rcu(&zwplug->node);
+ atomic_dec(&disk->nr_zone_wplugs);
spin_unlock_irqrestore(&disk->zone_wplugs_lock, flags);
disk_put_zone_wplug(zwplug);
}
@@ -593,6 +604,11 @@ static void disk_zone_wplug_abort(struct blk_zone_wplug *zwplug)
{
struct bio *bio;
+ if (bio_list_empty(&zwplug->bio_list))
+ return;
+
+ pr_warn_ratelimited("%s: zone %u: Aborting plugged BIOs\n",
+ zwplug->disk->disk_name, zwplug->zone_no);
while ((bio = bio_list_pop(&zwplug->bio_list)))
blk_zone_wplug_bio_io_error(zwplug, bio);
}
@@ -1040,6 +1056,47 @@ plug:
return true;
}
+static void blk_zone_wplug_handle_native_zone_append(struct bio *bio)
+{
+ struct gendisk *disk = bio->bi_bdev->bd_disk;
+ struct blk_zone_wplug *zwplug;
+ unsigned long flags;
+
+ /*
+ * We have native support for zone append operations, so we are not
+ * going to handle @bio through plugging. However, we may already have a
+ * zone write plug for the target zone if that zone was previously
+ * partially written using regular writes. In such case, we risk leaving
+ * the plug in the disk hash table if the zone is fully written using
+ * zone append operations. Avoid this by removing the zone write plug.
+ */
+ zwplug = disk_get_zone_wplug(disk, bio->bi_iter.bi_sector);
+ if (likely(!zwplug))
+ return;
+
+ spin_lock_irqsave(&zwplug->lock, flags);
+
+ /*
+ * We are about to remove the zone write plug. But if the user
+ * (mistakenly) has issued regular writes together with native zone
+ * append, we must aborts the writes as otherwise the plugged BIOs would
+ * not be executed by the plug BIO work as disk_get_zone_wplug() will
+ * return NULL after the plug is removed. Aborting the plugged write
+ * BIOs is consistent with the fact that these writes will most likely
+ * fail anyway as there is no ordering guarantees between zone append
+ * operations and regular write operations.
+ */
+ if (!bio_list_empty(&zwplug->bio_list)) {
+ pr_warn_ratelimited("%s: zone %u: Invalid mix of zone append and regular writes\n",
+ disk->disk_name, zwplug->zone_no);
+ disk_zone_wplug_abort(zwplug);
+ }
+ disk_remove_zone_wplug(disk, zwplug);
+ spin_unlock_irqrestore(&zwplug->lock, flags);
+
+ disk_put_zone_wplug(zwplug);
+}
+
/**
* blk_zone_plug_bio - Handle a zone write BIO with zone write plugging
* @bio: The BIO being submitted
@@ -1096,8 +1153,10 @@ bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs)
*/
switch (bio_op(bio)) {
case REQ_OP_ZONE_APPEND:
- if (!bdev_emulates_zone_append(bdev))
+ if (!bdev_emulates_zone_append(bdev)) {
+ blk_zone_wplug_handle_native_zone_append(bio);
return false;
+ }
fallthrough;
case REQ_OP_WRITE:
case REQ_OP_WRITE_ZEROES:
@@ -1284,6 +1343,7 @@ static int disk_alloc_zone_resources(struct gendisk *disk,
{
unsigned int i;
+ atomic_set(&disk->nr_zone_wplugs, 0);
disk->zone_wplugs_hash_bits =
min(ilog2(pool_size) + 1, BLK_ZONE_WPLUG_MAX_HASH_BITS);
@@ -1338,6 +1398,7 @@ static void disk_destroy_zone_wplugs_hash_table(struct gendisk *disk)
}
}
+ WARN_ON_ONCE(atomic_read(&disk->nr_zone_wplugs));
kfree(disk->zone_wplugs_hash);
disk->zone_wplugs_hash = NULL;
disk->zone_wplugs_hash_bits = 0;
@@ -1550,11 +1611,12 @@ static int blk_revalidate_seq_zone(struct blk_zone *zone, unsigned int idx,
}
/*
- * We need to track the write pointer of all zones that are not
- * empty nor full. So make sure we have a zone write plug for
- * such zone if the device has a zone write plug hash table.
+ * If the device needs zone append emulation, we need to track the
+ * write pointer of all zones that are not empty nor full. So make sure
+ * we have a zone write plug for such zone if the device has a zone
+ * write plug hash table.
*/
- if (!disk->zone_wplugs_hash)
+ if (!queue_emulates_zone_append(disk->queue) || !disk->zone_wplugs_hash)
return 0;
disk_zone_wplug_sync_wp_offset(disk, zone);
diff --git a/block/blk.h b/block/blk.h
index 90fa5f28ccab..9cf9a0099416 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -14,6 +14,7 @@
struct elevator_type;
#define BLK_DEV_MAX_SECTORS (LLONG_MAX >> 9)
+#define BLK_MIN_SEGMENT_SIZE 4096
/* Max future timer expiry for timeouts */
#define BLK_MAX_TIMEOUT (5 * HZ)
@@ -358,8 +359,12 @@ struct bio *bio_split_zone_append(struct bio *bio,
static inline bool bio_may_need_split(struct bio *bio,
const struct queue_limits *lim)
{
- return lim->chunk_sectors || bio->bi_vcnt != 1 ||
- bio->bi_io_vec->bv_len + bio->bi_io_vec->bv_offset > PAGE_SIZE;
+ if (lim->chunk_sectors)
+ return true;
+ if (bio->bi_vcnt != 1)
+ return true;
+ return bio->bi_io_vec->bv_len + bio->bi_io_vec->bv_offset >
+ lim->min_segment_size;
}
/**
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 8e895ae45c86..c842e2de6ef9 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -386,8 +386,12 @@ struct ahci_host_priv {
static inline bool ahci_ignore_port(struct ahci_host_priv *hpriv,
unsigned int portid)
{
- return portid >= hpriv->nports ||
- !(hpriv->mask_port_map & (1 << portid));
+ if (portid >= hpriv->nports)
+ return true;
+ /* mask_port_map not set means that all ports are available */
+ if (!hpriv->mask_port_map)
+ return false;
+ return !(hpriv->mask_port_map & (1 << portid));
}
extern int ahci_ignore_sss;
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index fdfa7b266218..e7ace4b10f15 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -541,6 +541,7 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv)
hpriv->saved_port_map = port_map;
}
+ /* mask_port_map not set means that all ports are available */
if (hpriv->mask_port_map) {
dev_warn(dev, "masking port_map 0x%lx -> 0x%lx\n",
port_map,
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 63ec2f218431..c085dd81ebe7 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4143,10 +4143,6 @@ static const struct ata_dev_quirks_entry __ata_dev_quirks[] = {
{ "Samsung SSD 860*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
ATA_QUIRK_ZERO_AFTER_TRIM |
ATA_QUIRK_NO_NCQ_ON_ATI },
- { "Samsung SSD 870 QVO*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
- ATA_QUIRK_ZERO_AFTER_TRIM |
- ATA_QUIRK_NO_NCQ_ON_ATI |
- ATA_QUIRK_NOLPM },
{ "Samsung SSD 870*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
ATA_QUIRK_ZERO_AFTER_TRIM |
ATA_QUIRK_NO_NCQ_ON_ATI },
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 90966dfbd278..8149e53fd0a7 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -2102,7 +2102,8 @@ static int btusb_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
return submit_or_queue_tx_urb(hdev, urb);
case HCI_SCODATA_PKT:
- if (hci_conn_num(hdev, SCO_LINK) < 1)
+ if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
+ hci_conn_num(hdev, SCO_LINK) < 1)
return -ENODEV;
urb = alloc_isoc_urb(hdev, skb);
@@ -2576,7 +2577,8 @@ static int btusb_send_frame_intel(struct hci_dev *hdev, struct sk_buff *skb)
return submit_or_queue_tx_urb(hdev, urb);
case HCI_SCODATA_PKT:
- if (hci_conn_num(hdev, SCO_LINK) < 1)
+ if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
+ hci_conn_num(hdev, SCO_LINK) < 1)
return -ENODEV;
urb = alloc_isoc_urb(hdev, skb);
diff --git a/drivers/firmware/cirrus/cs_dsp.c b/drivers/firmware/cirrus/cs_dsp.c
index 5365e9a43000..42433c19eb30 100644
--- a/drivers/firmware/cirrus/cs_dsp.c
+++ b/drivers/firmware/cirrus/cs_dsp.c
@@ -1609,8 +1609,8 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware,
goto out_fw;
}
- ret = regmap_raw_write_async(regmap, reg, buf->buf,
- le32_to_cpu(region->len));
+ ret = regmap_raw_write(regmap, reg, buf->buf,
+ le32_to_cpu(region->len));
if (ret != 0) {
cs_dsp_err(dsp,
"%s.%d: Failed to write %d bytes at %d in %s: %d\n",
@@ -1625,12 +1625,6 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware,
regions++;
}
- ret = regmap_async_complete(regmap);
- if (ret != 0) {
- cs_dsp_err(dsp, "Failed to complete async write: %d\n", ret);
- goto out_fw;
- }
-
if (pos > firmware->size)
cs_dsp_warn(dsp, "%s.%d: %zu bytes at end of file\n",
file, regions, pos - firmware->size);
@@ -1638,7 +1632,6 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware,
cs_dsp_debugfs_save_wmfwname(dsp, file);
out_fw:
- regmap_async_complete(regmap);
cs_dsp_buf_free(&buf_list);
if (ret == -EOVERFLOW)
@@ -2326,8 +2319,8 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware
cs_dsp_dbg(dsp, "%s.%d: Writing %d bytes at %x\n",
file, blocks, le32_to_cpu(blk->len),
reg);
- ret = regmap_raw_write_async(regmap, reg, buf->buf,
- le32_to_cpu(blk->len));
+ ret = regmap_raw_write(regmap, reg, buf->buf,
+ le32_to_cpu(blk->len));
if (ret != 0) {
cs_dsp_err(dsp,
"%s.%d: Failed to write to %x in %s: %d\n",
@@ -2339,10 +2332,6 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware
blocks++;
}
- ret = regmap_async_complete(regmap);
- if (ret != 0)
- cs_dsp_err(dsp, "Failed to complete async write: %d\n", ret);
-
if (pos > firmware->size)
cs_dsp_warn(dsp, "%s.%d: %zu bytes at end of file\n",
file, blocks, pos - firmware->size);
@@ -2350,7 +2339,6 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware
cs_dsp_debugfs_save_binname(dsp, file);
out_fw:
- regmap_async_complete(regmap);
cs_dsp_buf_free(&buf_list);
if (ret == -EOVERFLOW)
@@ -2561,8 +2549,8 @@ static int cs_dsp_adsp2_enable_core(struct cs_dsp *dsp)
{
int ret;
- ret = regmap_update_bits_async(dsp->regmap, dsp->base + ADSP2_CONTROL,
- ADSP2_SYS_ENA, ADSP2_SYS_ENA);
+ ret = regmap_update_bits(dsp->regmap, dsp->base + ADSP2_CONTROL,
+ ADSP2_SYS_ENA, ADSP2_SYS_ENA);
if (ret != 0)
return ret;
diff --git a/drivers/firmware/efi/cper-arm.c b/drivers/firmware/efi/cper-arm.c
index fa9c1c3bf168..f0a63d09d3c4 100644
--- a/drivers/firmware/efi/cper-arm.c
+++ b/drivers/firmware/efi/cper-arm.c
@@ -311,7 +311,7 @@ void cper_print_proc_arm(const char *pfx,
ctx_info = (struct cper_arm_ctx_info *)err_info;
max_ctx_type = ARRAY_SIZE(arm_reg_ctx_strs) - 1;
for (i = 0; i < proc->context_info_num; i++) {
- int size = sizeof(*ctx_info) + ctx_info->size;
+ int size = ALIGN(sizeof(*ctx_info) + ctx_info->size, 16);
printk("%sContext info structure %d:\n", pfx, i);
if (len < size) {
diff --git a/drivers/firmware/efi/cper-x86.c b/drivers/firmware/efi/cper-x86.c
index 438ed9eff6d0..3949d7b5e808 100644
--- a/drivers/firmware/efi/cper-x86.c
+++ b/drivers/firmware/efi/cper-x86.c
@@ -325,7 +325,7 @@ void cper_print_proc_ia(const char *pfx, const struct cper_sec_proc_ia *proc)
ctx_info = (struct cper_ia_proc_ctx *)err_info;
for (i = 0; i < VALID_PROC_CXT_INFO_NUM(proc->validation_bits); i++) {
- int size = sizeof(*ctx_info) + ctx_info->reg_arr_size;
+ int size = ALIGN(sizeof(*ctx_info) + ctx_info->reg_arr_size, 16);
int groupsize = 4;
printk("%sContext Information Structure %d:\n", pfx, i);
diff --git a/drivers/firmware/efi/mokvar-table.c b/drivers/firmware/efi/mokvar-table.c
index 5ed0602c2f75..208db29613c6 100644
--- a/drivers/firmware/efi/mokvar-table.c
+++ b/drivers/firmware/efi/mokvar-table.c
@@ -99,14 +99,13 @@ static struct kobject *mokvar_kobj;
*/
void __init efi_mokvar_table_init(void)
{
+ struct efi_mokvar_table_entry __aligned(1) *mokvar_entry, *next_entry;
efi_memory_desc_t md;
void *va = NULL;
unsigned long cur_offset = 0;
unsigned long offset_limit;
- unsigned long map_size = 0;
unsigned long map_size_needed = 0;
unsigned long size;
- struct efi_mokvar_table_entry *mokvar_entry;
int err;
if (!efi_enabled(EFI_MEMMAP))
@@ -134,48 +133,46 @@ void __init efi_mokvar_table_init(void)
*/
err = -EINVAL;
while (cur_offset + sizeof(*mokvar_entry) <= offset_limit) {
- mokvar_entry = va + cur_offset;
- map_size_needed = cur_offset + sizeof(*mokvar_entry);
- if (map_size_needed > map_size) {
- if (va)
- early_memunmap(va, map_size);
- /*
- * Map a little more than the fixed size entry
- * header, anticipating some data. It's safe to
- * do so as long as we stay within current memory
- * descriptor.
- */
- map_size = min(map_size_needed + 2*EFI_PAGE_SIZE,
- offset_limit);
- va = early_memremap(efi.mokvar_table, map_size);
- if (!va) {
- pr_err("Failed to map EFI MOKvar config table pa=0x%lx, size=%lu.\n",
- efi.mokvar_table, map_size);
- return;
- }
- mokvar_entry = va + cur_offset;
+ if (va)
+ early_memunmap(va, sizeof(*mokvar_entry));
+ va = early_memremap(efi.mokvar_table + cur_offset, sizeof(*mokvar_entry));
+ if (!va) {
+ pr_err("Failed to map EFI MOKvar config table pa=0x%lx, size=%zu.\n",
+ efi.mokvar_table + cur_offset, sizeof(*mokvar_entry));
+ return;
}
-
+ mokvar_entry = va;
+next:
/* Check for last sentinel entry */
if (mokvar_entry->name[0] == '\0') {
if (mokvar_entry->data_size != 0)
break;
err = 0;
+ map_size_needed = cur_offset + sizeof(*mokvar_entry);
break;
}
- /* Sanity check that the name is null terminated */
- size = strnlen(mokvar_entry->name,
- sizeof(mokvar_entry->name));
- if (size >= sizeof(mokvar_entry->name))
- break;
+ /* Enforce that the name is NUL terminated */
+ mokvar_entry->name[sizeof(mokvar_entry->name) - 1] = '\0';
/* Advance to the next entry */
- cur_offset = map_size_needed + mokvar_entry->data_size;
+ size = sizeof(*mokvar_entry) + mokvar_entry->data_size;
+ cur_offset += size;
+
+ /*
+ * Don't bother remapping if the current entry header and the
+ * next one end on the same page.
+ */
+ next_entry = (void *)((unsigned long)mokvar_entry + size);
+ if (((((unsigned long)(mokvar_entry + 1) - 1) ^
+ ((unsigned long)(next_entry + 1) - 1)) & PAGE_MASK) == 0) {
+ mokvar_entry = next_entry;
+ goto next;
+ }
}
if (va)
- early_memunmap(va, map_size);
+ early_memunmap(va, sizeof(*mokvar_entry));
if (err) {
pr_err("EFI MOKvar config table is not valid\n");
return;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index d100bb7a137c..018dfccd771b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1638,6 +1638,13 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
if (amdgpu_sriov_vf(adev))
return 0;
+ /* resizing on Dell G5 SE platforms causes problems with runtime pm */
+ if ((amdgpu_runtime_pm != 0) &&
+ adev->pdev->vendor == PCI_VENDOR_ID_ATI &&
+ adev->pdev->device == 0x731f &&
+ adev->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
+ return 0;
+
/* PCI_EXT_CAP_ID_VNDR extended capability is located at 0x100 */
if (!pci_find_ext_capability(adev->pdev, PCI_EXT_CAP_ID_VNDR))
DRM_WARN("System can't access extended configuration space, please check!!\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 784b03abb3a4..c1f35ded684e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -1638,22 +1638,19 @@ static ssize_t amdgpu_gfx_set_enforce_isolation(struct device *dev,
}
mutex_lock(&adev->enforce_isolation_mutex);
-
for (i = 0; i < num_partitions; i++) {
- if (adev->enforce_isolation[i] && !partition_values[i]) {
+ if (adev->enforce_isolation[i] && !partition_values[i])
/* Going from enabled to disabled */
amdgpu_vmid_free_reserved(adev, AMDGPU_GFXHUB(i));
- amdgpu_mes_set_enforce_isolation(adev, i, false);
- } else if (!adev->enforce_isolation[i] && partition_values[i]) {
+ else if (!adev->enforce_isolation[i] && partition_values[i])
/* Going from disabled to enabled */
amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(i));
- amdgpu_mes_set_enforce_isolation(adev, i, true);
- }
adev->enforce_isolation[i] = partition_values[i];
}
-
mutex_unlock(&adev->enforce_isolation_mutex);
+ amdgpu_mes_update_enforce_isolation(adev);
+
return count;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
index 32b27a1658e7..709c11cbeabd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
@@ -1681,7 +1681,8 @@ bool amdgpu_mes_suspend_resume_all_supported(struct amdgpu_device *adev)
}
/* Fix me -- node_id is used to identify the correct MES instances in the future */
-int amdgpu_mes_set_enforce_isolation(struct amdgpu_device *adev, uint32_t node_id, bool enable)
+static int amdgpu_mes_set_enforce_isolation(struct amdgpu_device *adev,
+ uint32_t node_id, bool enable)
{
struct mes_misc_op_input op_input = {0};
int r;
@@ -1703,6 +1704,23 @@ error:
return r;
}
+int amdgpu_mes_update_enforce_isolation(struct amdgpu_device *adev)
+{
+ int i, r = 0;
+
+ if (adev->enable_mes && adev->gfx.enable_cleaner_shader) {
+ mutex_lock(&adev->enforce_isolation_mutex);
+ for (i = 0; i < (adev->xcp_mgr ? adev->xcp_mgr->num_xcps : 1); i++) {
+ if (adev->enforce_isolation[i])
+ r |= amdgpu_mes_set_enforce_isolation(adev, i, true);
+ else
+ r |= amdgpu_mes_set_enforce_isolation(adev, i, false);
+ }
+ mutex_unlock(&adev->enforce_isolation_mutex);
+ }
+ return r;
+}
+
#if defined(CONFIG_DEBUG_FS)
static int amdgpu_debugfs_mes_event_log_show(struct seq_file *m, void *unused)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
index 2df2444ee892..e98ea7ede1ba 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
@@ -534,6 +534,6 @@ static inline void amdgpu_mes_unlock(struct amdgpu_mes *mes)
bool amdgpu_mes_suspend_resume_all_supported(struct amdgpu_device *adev);
-int amdgpu_mes_set_enforce_isolation(struct amdgpu_device *adev, uint32_t node_id, bool enable);
+int amdgpu_mes_update_enforce_isolation(struct amdgpu_device *adev);
#endif /* __AMDGPU_MES_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 01ae2f88dec8..262bd010a283 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -2281,7 +2281,7 @@ int amdgpu_ttm_clear_buffer(struct amdgpu_bo *bo,
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
struct amdgpu_res_cursor cursor;
u64 addr;
- int r;
+ int r = 0;
if (!adev->mman.buffer_funcs_enabled)
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
index 65f389eb65e5..f9a4d08eef92 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
@@ -1633,6 +1633,10 @@ static int mes_v11_0_hw_init(struct amdgpu_ip_block *ip_block)
goto failure;
}
+ r = amdgpu_mes_update_enforce_isolation(adev);
+ if (r)
+ goto failure;
+
out:
/*
* Disable KIQ ring usage from the driver once MES is enabled.
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
index 901e924e69ad..0fd0fa6ed518 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
@@ -1743,6 +1743,10 @@ static int mes_v12_0_hw_init(struct amdgpu_ip_block *ip_block)
goto failure;
}
+ r = amdgpu_mes_update_enforce_isolation(adev);
+ if (r)
+ goto failure;
+
out:
/*
* Disable KIQ ring usage from the driver once MES is enabled.
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
index 2eff37aaf827..1695dd78ede8 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
@@ -107,6 +107,8 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK |
0x53 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT;
+ m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
+ m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
m->cp_mqd_control = 1 << CP_MQD_CONTROL__PRIV_STATE__SHIFT;
m->cp_mqd_base_addr_lo = lower_32_bits(addr);
@@ -167,10 +169,10 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
m = get_mqd(mqd);
- m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
+ m->cp_hqd_pq_control &= ~CP_HQD_PQ_CONTROL__QUEUE_SIZE_MASK;
m->cp_hqd_pq_control |=
ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1;
- m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
+
pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
index 68dbc0399c87..3c0ae28c5923 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
@@ -154,6 +154,8 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK |
0x55 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT;
+ m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
+ m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
m->cp_mqd_control = 1 << CP_MQD_CONTROL__PRIV_STATE__SHIFT;
m->cp_mqd_base_addr_lo = lower_32_bits(addr);
@@ -221,10 +223,9 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
m = get_mqd(mqd);
- m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
+ m->cp_hqd_pq_control &= ~CP_HQD_PQ_CONTROL__QUEUE_SIZE_MASK;
m->cp_hqd_pq_control |=
ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1;
- m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
index 2b72d5b4949b..565858b9044d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
@@ -121,6 +121,8 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK |
0x55 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT;
+ m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
+ m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
m->cp_mqd_control = 1 << CP_MQD_CONTROL__PRIV_STATE__SHIFT;
m->cp_mqd_base_addr_lo = lower_32_bits(addr);
@@ -184,10 +186,9 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
m = get_mqd(mqd);
- m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
+ m->cp_hqd_pq_control &= ~CP_HQD_PQ_CONTROL__QUEUE_SIZE_MASK;
m->cp_hqd_pq_control |=
ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1;
- m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
index ff417d5361c4..3014925d95ff 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
@@ -183,6 +183,9 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK |
0x53 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT;
+ m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
+ m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
+
m->cp_mqd_control = 1 << CP_MQD_CONTROL__PRIV_STATE__SHIFT;
m->cp_mqd_base_addr_lo = lower_32_bits(addr);
@@ -245,7 +248,7 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
m = get_mqd(mqd);
- m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
+ m->cp_hqd_pq_control &= ~CP_HQD_PQ_CONTROL__QUEUE_SIZE_MASK;
m->cp_hqd_pq_control |= order_base_2(q->queue_size / 4) - 1;
pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index ac3fd81fecef..9d9645a2d18e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1618,75 +1618,130 @@ static bool dm_should_disable_stutter(struct pci_dev *pdev)
return false;
}
-static const struct dmi_system_id hpd_disconnect_quirk_table[] = {
+struct amdgpu_dm_quirks {
+ bool aux_hpd_discon;
+ bool support_edp0_on_dp1;
+};
+
+static struct amdgpu_dm_quirks quirk_entries = {
+ .aux_hpd_discon = false,
+ .support_edp0_on_dp1 = false
+};
+
+static int edp0_on_dp1_callback(const struct dmi_system_id *id)
+{
+ quirk_entries.support_edp0_on_dp1 = true;
+ return 0;
+}
+
+static int aux_hpd_discon_callback(const struct dmi_system_id *id)
+{
+ quirk_entries.aux_hpd_discon = true;
+ return 0;
+}
+
+static const struct dmi_system_id dmi_quirk_table[] = {
{
+ .callback = aux_hpd_discon_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"),
},
},
{
+ .callback = aux_hpd_discon_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"),
},
},
{
+ .callback = aux_hpd_discon_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"),
},
},
{
+ .callback = aux_hpd_discon_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower Plus 7010"),
},
},
{
+ .callback = aux_hpd_discon_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower 7010"),
},
},
{
+ .callback = aux_hpd_discon_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF Plus 7010"),
},
},
{
+ .callback = aux_hpd_discon_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF 7010"),
},
},
{
+ .callback = aux_hpd_discon_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro Plus 7010"),
},
},
{
+ .callback = aux_hpd_discon_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro 7010"),
},
},
+ {
+ .callback = edp0_on_dp1_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Elite mt645 G8 Mobile Thin Client"),
+ },
+ },
+ {
+ .callback = edp0_on_dp1_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 665 16 inch G11 Notebook PC"),
+ },
+ },
{}
/* TODO: refactor this from a fixed table to a dynamic option */
};
-static void retrieve_dmi_info(struct amdgpu_display_manager *dm)
+static void retrieve_dmi_info(struct amdgpu_display_manager *dm, struct dc_init_data *init_data)
{
- const struct dmi_system_id *dmi_id;
+ int dmi_id;
+ struct drm_device *dev = dm->ddev;
dm->aux_hpd_discon_quirk = false;
+ init_data->flags.support_edp0_on_dp1 = false;
+
+ dmi_id = dmi_check_system(dmi_quirk_table);
+
+ if (!dmi_id)
+ return;
- dmi_id = dmi_first_match(hpd_disconnect_quirk_table);
- if (dmi_id) {
+ if (quirk_entries.aux_hpd_discon) {
dm->aux_hpd_discon_quirk = true;
- DRM_INFO("aux_hpd_discon_quirk attached\n");
+ drm_info(dev, "aux_hpd_discon_quirk attached\n");
+ }
+ if (quirk_entries.support_edp0_on_dp1) {
+ init_data->flags.support_edp0_on_dp1 = true;
+ drm_info(dev, "aux_hpd_discon_quirk attached\n");
}
}
@@ -1994,7 +2049,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0))
init_data.num_virtual_links = 1;
- retrieve_dmi_info(&adev->dm);
+ retrieve_dmi_info(&adev->dm, &init_data);
if (adev->dm.bb_from_dmub)
init_data.bb_from_dmub = adev->dm.bb_from_dmub;
@@ -7240,8 +7295,14 @@ static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector)
struct dc_link *dc_link = aconnector->dc_link;
struct dc_sink *dc_em_sink = aconnector->dc_em_sink;
const struct drm_edid *drm_edid;
+ struct i2c_adapter *ddc;
+
+ if (dc_link && dc_link->aux_mode)
+ ddc = &aconnector->dm_dp_aux.aux.ddc;
+ else
+ ddc = &aconnector->i2c->base;
- drm_edid = drm_edid_read(connector);
+ drm_edid = drm_edid_read_ddc(connector, ddc);
drm_edid_connector_update(connector, drm_edid);
if (!drm_edid) {
DRM_ERROR("No EDID found on connector: %s.\n", connector->name);
@@ -7286,14 +7347,21 @@ static int get_modes(struct drm_connector *connector)
static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
{
struct drm_connector *connector = &aconnector->base;
+ struct dc_link *dc_link = aconnector->dc_link;
struct dc_sink_init_data init_params = {
.link = aconnector->dc_link,
.sink_signal = SIGNAL_TYPE_VIRTUAL
};
const struct drm_edid *drm_edid;
const struct edid *edid;
+ struct i2c_adapter *ddc;
+
+ if (dc_link && dc_link->aux_mode)
+ ddc = &aconnector->dm_dp_aux.aux.ddc;
+ else
+ ddc = &aconnector->i2c->base;
- drm_edid = drm_edid_read(connector);
+ drm_edid = drm_edid_read_ddc(connector, ddc);
drm_edid_connector_update(connector, drm_edid);
if (!drm_edid) {
DRM_ERROR("No EDID found on connector: %s.\n", connector->name);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
index 3390f0d8420a..c4a7fd453e5f 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
@@ -894,6 +894,7 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
struct drm_device *dev = adev_to_drm(adev);
struct drm_connector *connector;
struct drm_connector_list_iter iter;
+ int i;
drm_connector_list_iter_begin(dev, &iter);
drm_for_each_connector_iter(connector, &iter) {
@@ -920,6 +921,12 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
}
}
drm_connector_list_iter_end(&iter);
+
+ /* Update reference counts for HPDs */
+ for (i = DC_IRQ_SOURCE_HPD1; i <= adev->mode_info.num_hpd; i++) {
+ if (amdgpu_irq_get(adev, &adev->hpd_irq, i - DC_IRQ_SOURCE_HPD1))
+ drm_err(dev, "DM_IRQ: Failed get HPD for source=%d)!\n", i);
+ }
}
/**
@@ -935,6 +942,7 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
struct drm_device *dev = adev_to_drm(adev);
struct drm_connector *connector;
struct drm_connector_list_iter iter;
+ int i;
drm_connector_list_iter_begin(dev, &iter);
drm_for_each_connector_iter(connector, &iter) {
@@ -960,4 +968,10 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
}
}
drm_connector_list_iter_end(&iter);
+
+ /* Update reference counts for HPDs */
+ for (i = DC_IRQ_SOURCE_HPD1; i <= adev->mode_info.num_hpd; i++) {
+ if (amdgpu_irq_put(adev, &adev->hpd_irq, i - DC_IRQ_SOURCE_HPD1))
+ drm_err(dev, "DM_IRQ: Failed put HPD for source=%d!\n", i);
+ }
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
index 45858bf1523d..e140b7a04d72 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
@@ -54,7 +54,8 @@ static bool link_supports_psrsu(struct dc_link *link)
if (amdgpu_dc_debug_mask & DC_DISABLE_PSR_SU)
return false;
- return dc_dmub_check_min_version(dc->ctx->dmub_srv->dmub);
+ /* Temporarily disable PSR-SU to avoid glitches */
+ return false;
}
/*
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
index 67a8e22b1126..e237ea1185a7 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
@@ -3042,6 +3042,7 @@ static int kv_dpm_hw_init(struct amdgpu_ip_block *ip_block)
if (!amdgpu_dpm)
return 0;
+ mutex_lock(&adev->pm.mutex);
kv_dpm_setup_asic(adev);
ret = kv_dpm_enable(adev);
if (ret)
@@ -3049,6 +3050,8 @@ static int kv_dpm_hw_init(struct amdgpu_ip_block *ip_block)
else
adev->pm.dpm_enabled = true;
amdgpu_legacy_dpm_compute_clocks(adev);
+ mutex_unlock(&adev->pm.mutex);
+
return ret;
}
@@ -3066,32 +3069,42 @@ static int kv_dpm_suspend(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+ cancel_work_sync(&adev->pm.dpm.thermal.work);
+
if (adev->pm.dpm_enabled) {
+ mutex_lock(&adev->pm.mutex);
+ adev->pm.dpm_enabled = false;
/* disable dpm */
kv_dpm_disable(adev);
/* reset the power state */
adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
+ mutex_unlock(&adev->pm.mutex);
}
return 0;
}
static int kv_dpm_resume(struct amdgpu_ip_block *ip_block)
{
- int ret;
+ int ret = 0;
struct amdgpu_device *adev = ip_block->adev;
- if (adev->pm.dpm_enabled) {
+ if (!amdgpu_dpm)
+ return 0;
+
+ if (!adev->pm.dpm_enabled) {
+ mutex_lock(&adev->pm.mutex);
/* asic init will reset to the boot state */
kv_dpm_setup_asic(adev);
ret = kv_dpm_enable(adev);
- if (ret)
+ if (ret) {
adev->pm.dpm_enabled = false;
- else
+ } else {
adev->pm.dpm_enabled = true;
- if (adev->pm.dpm_enabled)
amdgpu_legacy_dpm_compute_clocks(adev);
+ }
+ mutex_unlock(&adev->pm.mutex);
}
- return 0;
+ return ret;
}
static bool kv_dpm_is_idle(void *handle)
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
index e861355ebd75..c7518b13e787 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
@@ -1009,9 +1009,12 @@ void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
int temp, size = sizeof(temp);
- if (!adev->pm.dpm_enabled)
- return;
+ mutex_lock(&adev->pm.mutex);
+ if (!adev->pm.dpm_enabled) {
+ mutex_unlock(&adev->pm.mutex);
+ return;
+ }
if (!pp_funcs->read_sensor(adev->powerplay.pp_handle,
AMDGPU_PP_SENSOR_GPU_TEMP,
(void *)&temp,
@@ -1033,4 +1036,5 @@ void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
adev->pm.dpm.state = dpm_state;
amdgpu_legacy_dpm_compute_clocks(adev->powerplay.pp_handle);
+ mutex_unlock(&adev->pm.mutex);
}
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
index a87dcf0974bc..d6dfe2599ebe 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
@@ -7786,6 +7786,7 @@ static int si_dpm_hw_init(struct amdgpu_ip_block *ip_block)
if (!amdgpu_dpm)
return 0;
+ mutex_lock(&adev->pm.mutex);
si_dpm_setup_asic(adev);
ret = si_dpm_enable(adev);
if (ret)
@@ -7793,6 +7794,7 @@ static int si_dpm_hw_init(struct amdgpu_ip_block *ip_block)
else
adev->pm.dpm_enabled = true;
amdgpu_legacy_dpm_compute_clocks(adev);
+ mutex_unlock(&adev->pm.mutex);
return ret;
}
@@ -7810,32 +7812,44 @@ static int si_dpm_suspend(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+ cancel_work_sync(&adev->pm.dpm.thermal.work);
+
if (adev->pm.dpm_enabled) {
+ mutex_lock(&adev->pm.mutex);
+ adev->pm.dpm_enabled = false;
/* disable dpm */
si_dpm_disable(adev);
/* reset the power state */
adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
+ mutex_unlock(&adev->pm.mutex);
}
+
return 0;
}
static int si_dpm_resume(struct amdgpu_ip_block *ip_block)
{
- int ret;
+ int ret = 0;
struct amdgpu_device *adev = ip_block->adev;
- if (adev->pm.dpm_enabled) {
+ if (!amdgpu_dpm)
+ return 0;
+
+ if (!adev->pm.dpm_enabled) {
/* asic init will reset to the boot state */
+ mutex_lock(&adev->pm.mutex);
si_dpm_setup_asic(adev);
ret = si_dpm_enable(adev);
- if (ret)
+ if (ret) {
adev->pm.dpm_enabled = false;
- else
+ } else {
adev->pm.dpm_enabled = true;
- if (adev->pm.dpm_enabled)
amdgpu_legacy_dpm_compute_clocks(adev);
+ }
+ mutex_unlock(&adev->pm.mutex);
}
- return 0;
+
+ return ret;
}
static bool si_dpm_is_idle(void *handle)
diff --git a/drivers/gpu/drm/drm_fbdev_dma.c b/drivers/gpu/drm/drm_fbdev_dma.c
index b14b581c059d..02a516e77192 100644
--- a/drivers/gpu/drm/drm_fbdev_dma.c
+++ b/drivers/gpu/drm/drm_fbdev_dma.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: MIT
#include <linux/fb.h>
+#include <linux/vmalloc.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
@@ -70,37 +71,102 @@ static const struct fb_ops drm_fbdev_dma_fb_ops = {
.fb_destroy = drm_fbdev_dma_fb_destroy,
};
-FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS(drm_fbdev_dma,
+FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS(drm_fbdev_dma_shadowed,
drm_fb_helper_damage_range,
drm_fb_helper_damage_area);
-static int drm_fbdev_dma_deferred_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
+static void drm_fbdev_dma_shadowed_fb_destroy(struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
- struct drm_framebuffer *fb = fb_helper->fb;
- struct drm_gem_dma_object *dma = drm_fb_dma_get_gem_obj(fb, 0);
+ void *shadow = info->screen_buffer;
+
+ if (!fb_helper->dev)
+ return;
- if (!dma->map_noncoherent)
- vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ if (info->fbdefio)
+ fb_deferred_io_cleanup(info);
+ drm_fb_helper_fini(fb_helper);
+ vfree(shadow);
- return fb_deferred_io_mmap(info, vma);
+ drm_client_buffer_vunmap(fb_helper->buffer);
+ drm_client_framebuffer_delete(fb_helper->buffer);
+ drm_client_release(&fb_helper->client);
+ drm_fb_helper_unprepare(fb_helper);
+ kfree(fb_helper);
}
-static const struct fb_ops drm_fbdev_dma_deferred_fb_ops = {
+static const struct fb_ops drm_fbdev_dma_shadowed_fb_ops = {
.owner = THIS_MODULE,
.fb_open = drm_fbdev_dma_fb_open,
.fb_release = drm_fbdev_dma_fb_release,
- __FB_DEFAULT_DEFERRED_OPS_RDWR(drm_fbdev_dma),
+ FB_DEFAULT_DEFERRED_OPS(drm_fbdev_dma_shadowed),
DRM_FB_HELPER_DEFAULT_OPS,
- __FB_DEFAULT_DEFERRED_OPS_DRAW(drm_fbdev_dma),
- .fb_mmap = drm_fbdev_dma_deferred_fb_mmap,
- .fb_destroy = drm_fbdev_dma_fb_destroy,
+ .fb_destroy = drm_fbdev_dma_shadowed_fb_destroy,
};
/*
* struct drm_fb_helper
*/
+static void drm_fbdev_dma_damage_blit_real(struct drm_fb_helper *fb_helper,
+ struct drm_clip_rect *clip,
+ struct iosys_map *dst)
+{
+ struct drm_framebuffer *fb = fb_helper->fb;
+ size_t offset = clip->y1 * fb->pitches[0];
+ size_t len = clip->x2 - clip->x1;
+ unsigned int y;
+ void *src;
+
+ switch (drm_format_info_bpp(fb->format, 0)) {
+ case 1:
+ offset += clip->x1 / 8;
+ len = DIV_ROUND_UP(len + clip->x1 % 8, 8);
+ break;
+ case 2:
+ offset += clip->x1 / 4;
+ len = DIV_ROUND_UP(len + clip->x1 % 4, 4);
+ break;
+ case 4:
+ offset += clip->x1 / 2;
+ len = DIV_ROUND_UP(len + clip->x1 % 2, 2);
+ break;
+ default:
+ offset += clip->x1 * fb->format->cpp[0];
+ len *= fb->format->cpp[0];
+ break;
+ }
+
+ src = fb_helper->info->screen_buffer + offset;
+ iosys_map_incr(dst, offset); /* go to first pixel within clip rect */
+
+ for (y = clip->y1; y < clip->y2; y++) {
+ iosys_map_memcpy_to(dst, 0, src, len);
+ iosys_map_incr(dst, fb->pitches[0]);
+ src += fb->pitches[0];
+ }
+}
+
+static int drm_fbdev_dma_damage_blit(struct drm_fb_helper *fb_helper,
+ struct drm_clip_rect *clip)
+{
+ struct drm_client_buffer *buffer = fb_helper->buffer;
+ struct iosys_map dst;
+
+ /*
+ * For fbdev emulation, we only have to protect against fbdev modeset
+ * operations. Nothing else will involve the client buffer's BO. So it
+ * is sufficient to acquire struct drm_fb_helper.lock here.
+ */
+ mutex_lock(&fb_helper->lock);
+
+ dst = buffer->map;
+ drm_fbdev_dma_damage_blit_real(fb_helper, clip, &dst);
+
+ mutex_unlock(&fb_helper->lock);
+
+ return 0;
+}
static int drm_fbdev_dma_helper_fb_dirty(struct drm_fb_helper *helper,
struct drm_clip_rect *clip)
{
@@ -112,6 +178,10 @@ static int drm_fbdev_dma_helper_fb_dirty(struct drm_fb_helper *helper,
return 0;
if (helper->fb->funcs->dirty) {
+ ret = drm_fbdev_dma_damage_blit(helper, clip);
+ if (drm_WARN_ONCE(dev, ret, "Damage blitter failed: ret=%d\n", ret))
+ return ret;
+
ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1);
if (drm_WARN_ONCE(dev, ret, "Dirty helper failed: ret=%d\n", ret))
return ret;
@@ -128,14 +198,80 @@ static const struct drm_fb_helper_funcs drm_fbdev_dma_helper_funcs = {
* struct drm_fb_helper
*/
+static int drm_fbdev_dma_driver_fbdev_probe_tail(struct drm_fb_helper *fb_helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct drm_device *dev = fb_helper->dev;
+ struct drm_client_buffer *buffer = fb_helper->buffer;
+ struct drm_gem_dma_object *dma_obj = to_drm_gem_dma_obj(buffer->gem);
+ struct drm_framebuffer *fb = fb_helper->fb;
+ struct fb_info *info = fb_helper->info;
+ struct iosys_map map = buffer->map;
+
+ info->fbops = &drm_fbdev_dma_fb_ops;
+
+ /* screen */
+ info->flags |= FBINFO_VIRTFB; /* system memory */
+ if (dma_obj->map_noncoherent)
+ info->flags |= FBINFO_READS_FAST; /* signal caching */
+ info->screen_size = sizes->surface_height * fb->pitches[0];
+ info->screen_buffer = map.vaddr;
+ if (!(info->flags & FBINFO_HIDE_SMEM_START)) {
+ if (!drm_WARN_ON(dev, is_vmalloc_addr(info->screen_buffer)))
+ info->fix.smem_start = page_to_phys(virt_to_page(info->screen_buffer));
+ }
+ info->fix.smem_len = info->screen_size;
+
+ return 0;
+}
+
+static int drm_fbdev_dma_driver_fbdev_probe_tail_shadowed(struct drm_fb_helper *fb_helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct drm_client_buffer *buffer = fb_helper->buffer;
+ struct fb_info *info = fb_helper->info;
+ size_t screen_size = buffer->gem->size;
+ void *screen_buffer;
+ int ret;
+
+ /*
+ * Deferred I/O requires struct page for framebuffer memory,
+ * which is not guaranteed for all DMA ranges. We thus create
+ * a shadow buffer in system memory.
+ */
+ screen_buffer = vzalloc(screen_size);
+ if (!screen_buffer)
+ return -ENOMEM;
+
+ info->fbops = &drm_fbdev_dma_shadowed_fb_ops;
+
+ /* screen */
+ info->flags |= FBINFO_VIRTFB; /* system memory */
+ info->flags |= FBINFO_READS_FAST; /* signal caching */
+ info->screen_buffer = screen_buffer;
+ info->fix.smem_len = screen_size;
+
+ fb_helper->fbdefio.delay = HZ / 20;
+ fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io;
+
+ info->fbdefio = &fb_helper->fbdefio;
+ ret = fb_deferred_io_init(info);
+ if (ret)
+ goto err_vfree;
+
+ return 0;
+
+err_vfree:
+ vfree(screen_buffer);
+ return ret;
+}
+
int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
struct drm_fb_helper_surface_size *sizes)
{
struct drm_client_dev *client = &fb_helper->client;
struct drm_device *dev = fb_helper->dev;
- bool use_deferred_io = false;
struct drm_client_buffer *buffer;
- struct drm_gem_dma_object *dma_obj;
struct drm_framebuffer *fb;
struct fb_info *info;
u32 format;
@@ -152,19 +288,9 @@ int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
sizes->surface_height, format);
if (IS_ERR(buffer))
return PTR_ERR(buffer);
- dma_obj = to_drm_gem_dma_obj(buffer->gem);
fb = buffer->fb;
- /*
- * Deferred I/O requires struct page for framebuffer memory,
- * which is not guaranteed for all DMA ranges. We thus only
- * install deferred I/O if we have a framebuffer that requires
- * it.
- */
- if (fb->funcs->dirty)
- use_deferred_io = true;
-
ret = drm_client_buffer_vmap(buffer, &map);
if (ret) {
goto err_drm_client_buffer_delete;
@@ -185,45 +311,12 @@ int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
drm_fb_helper_fill_info(info, fb_helper, sizes);
- if (use_deferred_io)
- info->fbops = &drm_fbdev_dma_deferred_fb_ops;
+ if (fb->funcs->dirty)
+ ret = drm_fbdev_dma_driver_fbdev_probe_tail_shadowed(fb_helper, sizes);
else
- info->fbops = &drm_fbdev_dma_fb_ops;
-
- /* screen */
- info->flags |= FBINFO_VIRTFB; /* system memory */
- if (dma_obj->map_noncoherent)
- info->flags |= FBINFO_READS_FAST; /* signal caching */
- info->screen_size = sizes->surface_height * fb->pitches[0];
- info->screen_buffer = map.vaddr;
- if (!(info->flags & FBINFO_HIDE_SMEM_START)) {
- if (!drm_WARN_ON(dev, is_vmalloc_addr(info->screen_buffer)))
- info->fix.smem_start = page_to_phys(virt_to_page(info->screen_buffer));
- }
- info->fix.smem_len = info->screen_size;
-
- /*
- * Only set up deferred I/O if the screen buffer supports
- * it. If this disagrees with the previous test for ->dirty,
- * mmap on the /dev/fb file might not work correctly.
- */
- if (!is_vmalloc_addr(info->screen_buffer) && info->fix.smem_start) {
- unsigned long pfn = info->fix.smem_start >> PAGE_SHIFT;
-
- if (drm_WARN_ON(dev, !pfn_to_page(pfn)))
- use_deferred_io = false;
- }
-
- /* deferred I/O */
- if (use_deferred_io) {
- fb_helper->fbdefio.delay = HZ / 20;
- fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io;
-
- info->fbdefio = &fb_helper->fbdefio;
- ret = fb_deferred_io_init(info);
- if (ret)
- goto err_drm_fb_helper_release_info;
- }
+ ret = drm_fbdev_dma_driver_fbdev_probe_tail(fb_helper, sizes);
+ if (ret)
+ goto err_drm_fb_helper_release_info;
return 0;
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 18c66992aa1d..ff2cf3daa7a2 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -866,7 +866,7 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
encoder->base.base.id, encoder->base.name);
if (!mst_pipe_mask && dp128b132b_pipe_mask) {
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
/*
* If we don't have 8b/10b MST, but have more than one
@@ -878,7 +878,8 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
* we don't expect MST to have been enabled at that point, and
* can assume it's SST.
*/
- if (hweight8(dp128b132b_pipe_mask) > 1 || intel_dp->is_mst)
+ if (hweight8(dp128b132b_pipe_mask) > 1 ||
+ intel_dp_mst_encoder_active_links(dig_port))
mst_pipe_mask = dp128b132b_pipe_mask;
}
@@ -4151,13 +4152,13 @@ static void intel_ddi_read_func_ctl(struct intel_encoder *encoder,
} else if (ddi_mode == TRANS_DDI_MODE_SELECT_DP_MST) {
intel_ddi_read_func_ctl_dp_mst(encoder, pipe_config, ddi_func_ctl);
} else if (ddi_mode == TRANS_DDI_MODE_SELECT_FDI_OR_128B132B && HAS_DP20(display)) {
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
/*
* If this is true, we know we're being called from mst stream
* encoder's ->get_config().
*/
- if (intel_dp->is_mst)
+ if (intel_dp_mst_encoder_active_links(dig_port))
intel_ddi_read_func_ctl_dp_mst(encoder, pipe_config, ddi_func_ctl);
else
intel_ddi_read_func_ctl_dp_sst(encoder, pipe_config, ddi_func_ctl);
diff --git a/drivers/gpu/drm/imagination/Makefile b/drivers/gpu/drm/imagination/Makefile
index 9bc6a3884c22..3d9d4d40fb80 100644
--- a/drivers/gpu/drm/imagination/Makefile
+++ b/drivers/gpu/drm/imagination/Makefile
@@ -1,8 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only OR MIT
# Copyright (c) 2023 Imagination Technologies Ltd.
-subdir-ccflags-y := -I$(src)
-
powervr-y := \
pvr_ccb.o \
pvr_cccb.o \
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 8d5c9c74cbb9..eac0d1d2dbda 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -775,7 +775,6 @@ nouveau_connector_force(struct drm_connector *connector)
if (!nv_encoder) {
NV_ERROR(drm, "can't find encoder to force %s on!\n",
connector->name);
- connector->status = connector_status_disconnected;
return;
}
diff --git a/drivers/gpu/drm/vkms/vkms_composer.c b/drivers/gpu/drm/vkms/vkms_composer.c
index b20ac1705726..fa269d279e25 100644
--- a/drivers/gpu/drm/vkms/vkms_composer.c
+++ b/drivers/gpu/drm/vkms/vkms_composer.c
@@ -67,7 +67,7 @@ static u16 lerp_u16(u16 a, u16 b, s64 t)
s64 delta = drm_fixp_mul(b_fp - a_fp, t);
- return drm_fixp2int(a_fp + delta);
+ return drm_fixp2int_round(a_fp + delta);
}
static s64 get_lut_index(const struct vkms_color_lut *lut, u16 channel_value)
diff --git a/drivers/gpu/drm/xe/regs/xe_engine_regs.h b/drivers/gpu/drm/xe/regs/xe_engine_regs.h
index d86219dedde2..b732c89816df 100644
--- a/drivers/gpu/drm/xe/regs/xe_engine_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_engine_regs.h
@@ -53,7 +53,6 @@
#define RING_CTL(base) XE_REG((base) + 0x3c)
#define RING_CTL_SIZE(size) ((size) - PAGE_SIZE) /* in bytes -> pages */
-#define RING_CTL_SIZE(size) ((size) - PAGE_SIZE) /* in bytes -> pages */
#define RING_START_UDW(base) XE_REG((base) + 0x48)
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index 913c74d6e2ae..b6a2dd742ebd 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -1248,6 +1248,8 @@ static void __guc_exec_queue_fini_async(struct work_struct *w)
if (xe_exec_queue_is_lr(q))
cancel_work_sync(&ge->lr_tdr);
+ /* Confirm no work left behind accessing device structures */
+ cancel_delayed_work_sync(&ge->sched.base.work_tdr);
release_guc_id(guc, q);
xe_sched_entity_fini(&ge->entity);
xe_sched_fini(&ge->sched);
diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c
index fa873f3d0a9d..eb6cd91e1e22 100644
--- a/drivers/gpu/drm/xe/xe_oa.c
+++ b/drivers/gpu/drm/xe/xe_oa.c
@@ -1689,7 +1689,7 @@ static int xe_oa_stream_init(struct xe_oa_stream *stream,
stream->oa_buffer.format = &stream->oa->oa_formats[param->oa_format];
stream->sample = param->sample;
- stream->periodic = param->period_exponent > 0;
+ stream->periodic = param->period_exponent >= 0;
stream->period_exponent = param->period_exponent;
stream->no_preempt = param->no_preempt;
stream->wait_num_reports = param->wait_num_reports;
@@ -1970,6 +1970,7 @@ int xe_oa_stream_open_ioctl(struct drm_device *dev, u64 data, struct drm_file *f
}
param.xef = xef;
+ param.period_exponent = -1;
ret = xe_oa_user_extensions(oa, XE_OA_USER_EXTN_FROM_OPEN, data, 0, &param);
if (ret)
return ret;
@@ -2024,7 +2025,7 @@ int xe_oa_stream_open_ioctl(struct drm_device *dev, u64 data, struct drm_file *f
goto err_exec_q;
}
- if (param.period_exponent > 0) {
+ if (param.period_exponent >= 0) {
u64 oa_period, oa_freq_hz;
/* Requesting samples from OAG buffer is a privileged operation */
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 690330352d4c..30259eba450b 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -666,20 +666,33 @@ int xe_vm_userptr_pin(struct xe_vm *vm)
/* Collect invalidated userptrs */
spin_lock(&vm->userptr.invalidated_lock);
+ xe_assert(vm->xe, list_empty(&vm->userptr.repin_list));
list_for_each_entry_safe(uvma, next, &vm->userptr.invalidated,
userptr.invalidate_link) {
list_del_init(&uvma->userptr.invalidate_link);
- list_move_tail(&uvma->userptr.repin_link,
- &vm->userptr.repin_list);
+ list_add_tail(&uvma->userptr.repin_link,
+ &vm->userptr.repin_list);
}
spin_unlock(&vm->userptr.invalidated_lock);
- /* Pin and move to temporary list */
+ /* Pin and move to bind list */
list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list,
userptr.repin_link) {
err = xe_vma_userptr_pin_pages(uvma);
if (err == -EFAULT) {
list_del_init(&uvma->userptr.repin_link);
+ /*
+ * We might have already done the pin once already, but
+ * then had to retry before the re-bind happened, due
+ * some other condition in the caller, but in the
+ * meantime the userptr got dinged by the notifier such
+ * that we need to revalidate here, but this time we hit
+ * the EFAULT. In such a case make sure we remove
+ * ourselves from the rebind list to avoid going down in
+ * flames.
+ */
+ if (!list_empty(&uvma->vma.combined_links.rebind))
+ list_del_init(&uvma->vma.combined_links.rebind);
/* Wait for pending binds */
xe_vm_lock(vm, false);
@@ -690,10 +703,10 @@ int xe_vm_userptr_pin(struct xe_vm *vm)
err = xe_vm_invalidate_vma(&uvma->vma);
xe_vm_unlock(vm);
if (err)
- return err;
+ break;
} else {
- if (err < 0)
- return err;
+ if (err)
+ break;
list_del_init(&uvma->userptr.repin_link);
list_move_tail(&uvma->vma.combined_links.rebind,
@@ -701,7 +714,19 @@ int xe_vm_userptr_pin(struct xe_vm *vm)
}
}
- return 0;
+ if (err) {
+ down_write(&vm->userptr.notifier_lock);
+ spin_lock(&vm->userptr.invalidated_lock);
+ list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list,
+ userptr.repin_link) {
+ list_del_init(&uvma->userptr.repin_link);
+ list_move_tail(&uvma->userptr.invalidate_link,
+ &vm->userptr.invalidated);
+ }
+ spin_unlock(&vm->userptr.invalidated_lock);
+ up_write(&vm->userptr.notifier_lock);
+ }
+ return err;
}
/**
@@ -1066,6 +1091,7 @@ static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence)
xe_assert(vm->xe, vma->gpuva.flags & XE_VMA_DESTROYED);
spin_lock(&vm->userptr.invalidated_lock);
+ xe_assert(vm->xe, list_empty(&to_userptr_vma(vma)->userptr.repin_link));
list_del(&to_userptr_vma(vma)->userptr.invalidate_link);
spin_unlock(&vm->userptr.invalidated_lock);
} else if (!xe_vma_is_null(vma)) {
diff --git a/drivers/i2c/busses/i2c-amd-asf-plat.c b/drivers/i2c/busses/i2c-amd-asf-plat.c
index 7512614bf4b7..93ebec162c6d 100644
--- a/drivers/i2c/busses/i2c-amd-asf-plat.c
+++ b/drivers/i2c/busses/i2c-amd-asf-plat.c
@@ -293,6 +293,7 @@ static irqreturn_t amd_asf_irq_handler(int irq, void *ptr)
amd_asf_update_ioport_target(piix4_smba, ASF_SLV_INTR, SMBHSTSTS, true);
}
+ iowrite32(irq, dev->eoi_base);
return IRQ_HANDLED;
}
diff --git a/drivers/i2c/busses/i2c-ls2x.c b/drivers/i2c/busses/i2c-ls2x.c
index 8821cac3897b..b475dd27b7af 100644
--- a/drivers/i2c/busses/i2c-ls2x.c
+++ b/drivers/i2c/busses/i2c-ls2x.c
@@ -10,6 +10,7 @@
* Rewritten for mainline by Binbin Zhou <zhoubinbin@loongson.cn>
*/
+#include <linux/bitfield.h>
#include <linux/bits.h>
#include <linux/completion.h>
#include <linux/device.h>
@@ -26,7 +27,8 @@
#include <linux/units.h>
/* I2C Registers */
-#define I2C_LS2X_PRER 0x0 /* Freq Division Register(16 bits) */
+#define I2C_LS2X_PRER_LO 0x0 /* Freq Division Low Byte Register */
+#define I2C_LS2X_PRER_HI 0x1 /* Freq Division High Byte Register */
#define I2C_LS2X_CTR 0x2 /* Control Register */
#define I2C_LS2X_TXR 0x3 /* Transport Data Register */
#define I2C_LS2X_RXR 0x3 /* Receive Data Register */
@@ -93,6 +95,7 @@ static irqreturn_t ls2x_i2c_isr(int this_irq, void *dev_id)
*/
static void ls2x_i2c_adjust_bus_speed(struct ls2x_i2c_priv *priv)
{
+ u16 val;
struct i2c_timings *t = &priv->i2c_t;
struct device *dev = priv->adapter.dev.parent;
u32 acpi_speed = i2c_acpi_find_bus_speed(dev);
@@ -104,9 +107,14 @@ static void ls2x_i2c_adjust_bus_speed(struct ls2x_i2c_priv *priv)
else
t->bus_freq_hz = LS2X_I2C_FREQ_STD;
- /* Calculate and set i2c frequency. */
- writew(LS2X_I2C_PCLK_FREQ / (5 * t->bus_freq_hz) - 1,
- priv->base + I2C_LS2X_PRER);
+ /*
+ * According to the chip manual, we can only access the registers as bytes,
+ * otherwise the high bits will be truncated.
+ * So set the I2C frequency with a sequential writeb() instead of writew().
+ */
+ val = LS2X_I2C_PCLK_FREQ / (5 * t->bus_freq_hz) - 1;
+ writeb(FIELD_GET(GENMASK(7, 0), val), priv->base + I2C_LS2X_PRER_LO);
+ writeb(FIELD_GET(GENMASK(15, 8), val), priv->base + I2C_LS2X_PRER_HI);
}
static void ls2x_i2c_init(struct ls2x_i2c_priv *priv)
diff --git a/drivers/i2c/busses/i2c-npcm7xx.c b/drivers/i2c/busses/i2c-npcm7xx.c
index 3ca08b8ef8af..de713b5747fe 100644
--- a/drivers/i2c/busses/i2c-npcm7xx.c
+++ b/drivers/i2c/busses/i2c-npcm7xx.c
@@ -2554,6 +2554,13 @@ static int npcm_i2c_probe_bus(struct platform_device *pdev)
if (irq < 0)
return irq;
+ /*
+ * Disable the interrupt to avoid the interrupt handler being triggered
+ * incorrectly by the asynchronous interrupt status since the machine
+ * might do a warm reset during the last smbus/i2c transfer session.
+ */
+ npcm_i2c_int_enable(bus, false);
+
ret = devm_request_irq(bus->dev, irq, npcm_i2c_bus_irq, 0,
dev_name(bus->dev), bus);
if (ret)
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 118fe1d37c22..0fdb1d1316c4 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -56,6 +56,7 @@
#include <asm/intel-family.h>
#include <asm/mwait.h>
#include <asm/spec-ctrl.h>
+#include <asm/tsc.h>
#include <asm/fpu/api.h>
#define INTEL_IDLE_VERSION "0.5.1"
@@ -1799,6 +1800,9 @@ static void __init intel_idle_init_cstates_acpi(struct cpuidle_driver *drv)
if (intel_idle_state_needs_timer_stop(state))
state->flags |= CPUIDLE_FLAG_TIMER_STOP;
+ if (cx->type > ACPI_STATE_C1 && !boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
+ mark_tsc_unstable("TSC halts in idle");
+
state->enter = intel_idle;
state->enter_s2idle = intel_idle_s2idle;
}
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
index b91a85a491d0..3721446c6ba4 100644
--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
@@ -187,7 +187,6 @@ struct bnxt_re_dev {
#define BNXT_RE_FLAG_ISSUE_ROCE_STATS 29
struct net_device *netdev;
struct auxiliary_device *adev;
- struct notifier_block nb;
unsigned int version, major, minor;
struct bnxt_qplib_chip_ctx *chip_ctx;
struct bnxt_en_dev *en_dev;
diff --git a/drivers/infiniband/hw/bnxt_re/hw_counters.c b/drivers/infiniband/hw/bnxt_re/hw_counters.c
index 3ac47f4e6122..f039aefcaf67 100644
--- a/drivers/infiniband/hw/bnxt_re/hw_counters.c
+++ b/drivers/infiniband/hw/bnxt_re/hw_counters.c
@@ -348,8 +348,8 @@ int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
goto done;
}
bnxt_re_copy_err_stats(rdev, stats, err_s);
- if (_is_ext_stats_supported(rdev->dev_attr->dev_cap_flags) &&
- !rdev->is_virtfn) {
+ if (bnxt_ext_stats_supported(rdev->chip_ctx, rdev->dev_attr->dev_cap_flags,
+ rdev->is_virtfn)) {
rc = bnxt_re_get_ext_stat(rdev, stats);
if (rc) {
clear_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS,
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index 2de101d6e825..6f5db32082dd 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -1870,6 +1870,8 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq,
srq->qplib_srq.threshold = srq_init_attr->attr.srq_limit;
srq->srq_limit = srq_init_attr->attr.srq_limit;
srq->qplib_srq.eventq_hw_ring_id = rdev->nqr->nq[0].ring_id;
+ srq->qplib_srq.sg_info.pgsize = PAGE_SIZE;
+ srq->qplib_srq.sg_info.pgshft = PAGE_SHIFT;
nq = &rdev->nqr->nq[0];
if (udata) {
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index e9e4da4dd576..a94c8c5387d9 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -396,11 +396,16 @@ free_dcb:
static void bnxt_re_async_notifier(void *handle, struct hwrm_async_event_cmpl *cmpl)
{
- struct bnxt_re_dev *rdev = (struct bnxt_re_dev *)handle;
+ struct bnxt_re_en_dev_info *en_info = auxiliary_get_drvdata(handle);
struct bnxt_re_dcb_work *dcb_work;
+ struct bnxt_re_dev *rdev;
u32 data1, data2;
u16 event_id;
+ rdev = en_info->rdev;
+ if (!rdev)
+ return;
+
event_id = le16_to_cpu(cmpl->event_id);
data1 = le32_to_cpu(cmpl->event_data1);
data2 = le32_to_cpu(cmpl->event_data2);
@@ -433,6 +438,8 @@ static void bnxt_re_stop_irq(void *handle, bool reset)
int indx;
rdev = en_info->rdev;
+ if (!rdev)
+ return;
rcfw = &rdev->rcfw;
if (reset) {
@@ -461,6 +468,8 @@ static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent)
int indx, rc;
rdev = en_info->rdev;
+ if (!rdev)
+ return;
msix_ent = rdev->nqr->msix_entries;
rcfw = &rdev->rcfw;
if (!ent) {
@@ -1350,7 +1359,6 @@ static struct bnxt_re_dev *bnxt_re_dev_add(struct auxiliary_device *adev,
return NULL;
}
/* Default values */
- rdev->nb.notifier_call = NULL;
rdev->netdev = en_dev->net;
rdev->en_dev = en_dev;
rdev->adev = adev;
@@ -2345,15 +2353,6 @@ exit:
static void bnxt_re_remove_device(struct bnxt_re_dev *rdev, u8 op_type,
struct auxiliary_device *aux_dev)
{
- if (rdev->nb.notifier_call) {
- unregister_netdevice_notifier(&rdev->nb);
- rdev->nb.notifier_call = NULL;
- } else {
- /* If notifier is null, we should have already done a
- * clean up before coming here.
- */
- return;
- }
bnxt_re_setup_cc(rdev, false);
ib_unregister_device(&rdev->ibdev);
bnxt_re_dev_uninit(rdev, op_type);
@@ -2433,6 +2432,7 @@ static int bnxt_re_suspend(struct auxiliary_device *adev, pm_message_t state)
ibdev_info(&rdev->ibdev, "%s: L2 driver notified to stop en_state 0x%lx",
__func__, en_dev->en_state);
bnxt_re_remove_device(rdev, BNXT_RE_PRE_RECOVERY_REMOVE, adev);
+ bnxt_re_update_en_info_rdev(NULL, en_info, adev);
mutex_unlock(&bnxt_re_mutex);
return 0;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h
index be5d907a036b..711990232de1 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h
@@ -547,6 +547,14 @@ static inline bool _is_ext_stats_supported(u16 dev_cap_flags)
CREQ_QUERY_FUNC_RESP_SB_EXT_STATS;
}
+static inline int bnxt_ext_stats_supported(struct bnxt_qplib_chip_ctx *ctx,
+ u16 flags, bool virtfn)
+{
+ /* ext stats supported if cap flag is set AND is a PF OR a Thor2 VF */
+ return (_is_ext_stats_supported(flags) &&
+ ((virtfn && bnxt_qplib_is_chip_gen_p7(ctx)) || (!virtfn)));
+}
+
static inline bool _is_hw_retx_supported(u16 dev_cap_flags)
{
return dev_cap_flags &
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index dded339802b3..160e8927d364 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -1286,10 +1286,8 @@ static u32 hns_roce_cmdq_tx_timeout(u16 opcode, u32 tx_timeout)
return tx_timeout;
}
-static void hns_roce_wait_csq_done(struct hns_roce_dev *hr_dev, u16 opcode)
+static void hns_roce_wait_csq_done(struct hns_roce_dev *hr_dev, u32 tx_timeout)
{
- struct hns_roce_v2_priv *priv = hr_dev->priv;
- u32 tx_timeout = hns_roce_cmdq_tx_timeout(opcode, priv->cmq.tx_timeout);
u32 timeout = 0;
do {
@@ -1299,8 +1297,9 @@ static void hns_roce_wait_csq_done(struct hns_roce_dev *hr_dev, u16 opcode)
} while (++timeout < tx_timeout);
}
-static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
- struct hns_roce_cmq_desc *desc, int num)
+static int __hns_roce_cmq_send_one(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cmq_desc *desc,
+ int num, u32 tx_timeout)
{
struct hns_roce_v2_priv *priv = hr_dev->priv;
struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
@@ -1309,8 +1308,6 @@ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
int ret;
int i;
- spin_lock_bh(&csq->lock);
-
tail = csq->head;
for (i = 0; i < num; i++) {
@@ -1324,22 +1321,17 @@ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_CMDS_CNT]);
- hns_roce_wait_csq_done(hr_dev, le16_to_cpu(desc->opcode));
+ hns_roce_wait_csq_done(hr_dev, tx_timeout);
if (hns_roce_cmq_csq_done(hr_dev)) {
ret = 0;
for (i = 0; i < num; i++) {
/* check the result of hardware write back */
- desc[i] = csq->desc[tail++];
+ desc_ret = le16_to_cpu(csq->desc[tail++].retval);
if (tail == csq->desc_num)
tail = 0;
-
- desc_ret = le16_to_cpu(desc[i].retval);
if (likely(desc_ret == CMD_EXEC_SUCCESS))
continue;
- dev_err_ratelimited(hr_dev->dev,
- "Cmdq IO error, opcode = 0x%x, return = 0x%x.\n",
- desc->opcode, desc_ret);
ret = hns_roce_cmd_err_convert_errno(desc_ret);
}
} else {
@@ -1354,14 +1346,54 @@ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
ret = -EAGAIN;
}
- spin_unlock_bh(&csq->lock);
-
if (ret)
atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_CMDS_ERR_CNT]);
return ret;
}
+static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cmq_desc *desc, int num)
+{
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
+ struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
+ u16 opcode = le16_to_cpu(desc->opcode);
+ u32 tx_timeout = hns_roce_cmdq_tx_timeout(opcode, priv->cmq.tx_timeout);
+ u8 try_cnt = HNS_ROCE_OPC_POST_MB_TRY_CNT;
+ u32 rsv_tail;
+ int ret;
+ int i;
+
+ while (try_cnt) {
+ try_cnt--;
+
+ spin_lock_bh(&csq->lock);
+ rsv_tail = csq->head;
+ ret = __hns_roce_cmq_send_one(hr_dev, desc, num, tx_timeout);
+ if (opcode == HNS_ROCE_OPC_POST_MB && ret == -ETIME &&
+ try_cnt) {
+ spin_unlock_bh(&csq->lock);
+ mdelay(HNS_ROCE_OPC_POST_MB_RETRY_GAP_MSEC);
+ continue;
+ }
+
+ for (i = 0; i < num; i++) {
+ desc[i] = csq->desc[rsv_tail++];
+ if (rsv_tail == csq->desc_num)
+ rsv_tail = 0;
+ }
+ spin_unlock_bh(&csq->lock);
+ break;
+ }
+
+ if (ret)
+ dev_err_ratelimited(hr_dev->dev,
+ "Cmdq IO error, opcode = 0x%x, return = %d.\n",
+ opcode, ret);
+
+ return ret;
+}
+
static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
struct hns_roce_cmq_desc *desc, int num)
{
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index cbdbc9edbce6..91a5665465ff 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -230,6 +230,8 @@ enum hns_roce_opcode_type {
};
#define HNS_ROCE_OPC_POST_MB_TIMEOUT 35000
+#define HNS_ROCE_OPC_POST_MB_TRY_CNT 8
+#define HNS_ROCE_OPC_POST_MB_RETRY_GAP_MSEC 5
struct hns_roce_cmdq_tx_timeout_map {
u16 opcode;
u32 tx_timeout;
diff --git a/drivers/infiniband/hw/mana/main.c b/drivers/infiniband/hw/mana/main.c
index 67c2d43135a8..457cea6d9909 100644
--- a/drivers/infiniband/hw/mana/main.c
+++ b/drivers/infiniband/hw/mana/main.c
@@ -174,7 +174,7 @@ static int mana_gd_allocate_doorbell_page(struct gdma_context *gc,
req.resource_type = GDMA_RESOURCE_DOORBELL_PAGE;
req.num_resources = 1;
- req.alignment = 1;
+ req.alignment = PAGE_SIZE / MANA_PAGE_SIZE;
/* Have GDMA start searching from 0 */
req.allocated_resources = 0;
diff --git a/drivers/infiniband/hw/mlx5/ah.c b/drivers/infiniband/hw/mlx5/ah.c
index 505bc47fd575..99036afb3aef 100644
--- a/drivers/infiniband/hw/mlx5/ah.c
+++ b/drivers/infiniband/hw/mlx5/ah.c
@@ -67,7 +67,8 @@ static void create_ib_ah(struct mlx5_ib_dev *dev, struct mlx5_ib_ah *ah,
ah->av.tclass = grh->traffic_class;
}
- ah->av.stat_rate_sl = (rdma_ah_get_static_rate(ah_attr) << 4);
+ ah->av.stat_rate_sl =
+ (mlx5r_ib_rate(dev, rdma_ah_get_static_rate(ah_attr)) << 4);
if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) {
if (init_attr->xmit_slave)
diff --git a/drivers/infiniband/hw/mlx5/counters.c b/drivers/infiniband/hw/mlx5/counters.c
index 4f6c1968a2ee..81cfa74147a1 100644
--- a/drivers/infiniband/hw/mlx5/counters.c
+++ b/drivers/infiniband/hw/mlx5/counters.c
@@ -546,6 +546,7 @@ static int mlx5_ib_counter_bind_qp(struct rdma_counter *counter,
struct ib_qp *qp)
{
struct mlx5_ib_dev *dev = to_mdev(qp->device);
+ bool new = false;
int err;
if (!counter->id) {
@@ -560,6 +561,7 @@ static int mlx5_ib_counter_bind_qp(struct rdma_counter *counter,
return err;
counter->id =
MLX5_GET(alloc_q_counter_out, out, counter_set_id);
+ new = true;
}
err = mlx5_ib_qp_set_counter(qp, counter);
@@ -569,8 +571,10 @@ static int mlx5_ib_counter_bind_qp(struct rdma_counter *counter,
return 0;
fail_set_counter:
- mlx5_ib_counter_dealloc(counter);
- counter->id = 0;
+ if (new) {
+ mlx5_ib_counter_dealloc(counter);
+ counter->id = 0;
+ }
return err;
}
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index bb02b6adbf2c..753faa9ad06a 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -1550,7 +1550,7 @@ static void mlx5_ib_dmabuf_invalidate_cb(struct dma_buf_attachment *attach)
dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv);
- if (!umem_dmabuf->sgt)
+ if (!umem_dmabuf->sgt || !mr)
return;
mlx5r_umr_update_mr_pas(mr, MLX5_IB_UPD_XLT_ZAP);
@@ -1935,7 +1935,8 @@ err:
static void
mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
{
- if (!mr->umem && !mr->data_direct && mr->descs) {
+ if (!mr->umem && !mr->data_direct &&
+ mr->ibmr.type != IB_MR_TYPE_DM && mr->descs) {
struct ib_device *device = mr->ibmr.device;
int size = mr->max_descs * mr->desc_size;
struct mlx5_ib_dev *dev = to_mdev(device);
@@ -2022,11 +2023,16 @@ static int mlx5_revoke_mr(struct mlx5_ib_mr *mr)
struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
struct mlx5_cache_ent *ent = mr->mmkey.cache_ent;
bool is_odp = is_odp_mr(mr);
+ bool is_odp_dma_buf = is_dmabuf_mr(mr) &&
+ !to_ib_umem_dmabuf(mr->umem)->pinned;
int ret = 0;
if (is_odp)
mutex_lock(&to_ib_umem_odp(mr->umem)->umem_mutex);
+ if (is_odp_dma_buf)
+ dma_resv_lock(to_ib_umem_dmabuf(mr->umem)->attach->dmabuf->resv, NULL);
+
if (mr->mmkey.cacheable && !mlx5r_umr_revoke_mr(mr) && !cache_ent_find_and_store(dev, mr)) {
ent = mr->mmkey.cache_ent;
/* upon storing to a clean temp entry - schedule its cleanup */
@@ -2054,6 +2060,12 @@ out:
mutex_unlock(&to_ib_umem_odp(mr->umem)->umem_mutex);
}
+ if (is_odp_dma_buf) {
+ if (!ret)
+ to_ib_umem_dmabuf(mr->umem)->private = NULL;
+ dma_resv_unlock(to_ib_umem_dmabuf(mr->umem)->attach->dmabuf->resv);
+ }
+
return ret;
}
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index f1e23583e6c0..e77c9280c07e 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -242,6 +242,7 @@ static void destroy_unused_implicit_child_mr(struct mlx5_ib_mr *mr)
if (__xa_cmpxchg(&imr->implicit_children, idx, mr, NULL, GFP_KERNEL) !=
mr) {
xa_unlock(&imr->implicit_children);
+ mlx5r_deref_odp_mkey(&imr->mmkey);
return;
}
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index a43eba9d3572..88724d15705d 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -3447,11 +3447,11 @@ static int ib_to_mlx5_rate_map(u8 rate)
return 0;
}
-static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate)
+int mlx5r_ib_rate(struct mlx5_ib_dev *dev, u8 rate)
{
u32 stat_rate_support;
- if (rate == IB_RATE_PORT_CURRENT)
+ if (rate == IB_RATE_PORT_CURRENT || rate == IB_RATE_800_GBPS)
return 0;
if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_800_GBPS)
@@ -3596,7 +3596,7 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
sizeof(grh->dgid.raw));
}
- err = ib_rate_to_mlx5(dev, rdma_ah_get_static_rate(ah));
+ err = mlx5r_ib_rate(dev, rdma_ah_get_static_rate(ah));
if (err < 0)
return err;
MLX5_SET(ads, path, stat_rate, err);
@@ -4579,6 +4579,8 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
set_id = mlx5_ib_get_counters_id(dev, attr->port_num - 1);
MLX5_SET(dctc, dctc, counter_set_id, set_id);
+
+ qp->port = attr->port_num;
} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
struct mlx5_ib_modify_qp_resp resp = {};
u32 out[MLX5_ST_SZ_DW(create_dct_out)] = {};
@@ -5074,7 +5076,7 @@ static int mlx5_ib_dct_query_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *mqp,
}
if (qp_attr_mask & IB_QP_PORT)
- qp_attr->port_num = MLX5_GET(dctc, dctc, port);
+ qp_attr->port_num = mqp->port;
if (qp_attr_mask & IB_QP_MIN_RNR_TIMER)
qp_attr->min_rnr_timer = MLX5_GET(dctc, dctc, min_rnr_nak);
if (qp_attr_mask & IB_QP_AV) {
diff --git a/drivers/infiniband/hw/mlx5/qp.h b/drivers/infiniband/hw/mlx5/qp.h
index b6ee7c3ee1ca..2530e7730635 100644
--- a/drivers/infiniband/hw/mlx5/qp.h
+++ b/drivers/infiniband/hw/mlx5/qp.h
@@ -56,4 +56,5 @@ int mlx5_core_xrcd_dealloc(struct mlx5_ib_dev *dev, u32 xrcdn);
int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter);
int mlx5_ib_qp_event_init(void);
void mlx5_ib_qp_event_cleanup(void);
+int mlx5r_ib_rate(struct mlx5_ib_dev *dev, u8 rate);
#endif /* _MLX5_IB_QP_H */
diff --git a/drivers/infiniband/hw/mlx5/umr.c b/drivers/infiniband/hw/mlx5/umr.c
index 887fd6fa3ba9..793f3c5c4d01 100644
--- a/drivers/infiniband/hw/mlx5/umr.c
+++ b/drivers/infiniband/hw/mlx5/umr.c
@@ -231,30 +231,6 @@ void mlx5r_umr_cleanup(struct mlx5_ib_dev *dev)
ib_dealloc_pd(dev->umrc.pd);
}
-static int mlx5r_umr_recover(struct mlx5_ib_dev *dev)
-{
- struct umr_common *umrc = &dev->umrc;
- struct ib_qp_attr attr;
- int err;
-
- attr.qp_state = IB_QPS_RESET;
- err = ib_modify_qp(umrc->qp, &attr, IB_QP_STATE);
- if (err) {
- mlx5_ib_dbg(dev, "Couldn't modify UMR QP\n");
- goto err;
- }
-
- err = mlx5r_umr_qp_rst2rts(dev, umrc->qp);
- if (err)
- goto err;
-
- umrc->state = MLX5_UMR_STATE_ACTIVE;
- return 0;
-
-err:
- umrc->state = MLX5_UMR_STATE_ERR;
- return err;
-}
static int mlx5r_umr_post_send(struct ib_qp *ibqp, u32 mkey, struct ib_cqe *cqe,
struct mlx5r_umr_wqe *wqe, bool with_data)
@@ -302,6 +278,61 @@ out:
return err;
}
+static int mlx5r_umr_recover(struct mlx5_ib_dev *dev, u32 mkey,
+ struct mlx5r_umr_context *umr_context,
+ struct mlx5r_umr_wqe *wqe, bool with_data)
+{
+ struct umr_common *umrc = &dev->umrc;
+ struct ib_qp_attr attr;
+ int err;
+
+ mutex_lock(&umrc->lock);
+ /* Preventing any further WRs to be sent now */
+ if (umrc->state != MLX5_UMR_STATE_RECOVER) {
+ mlx5_ib_warn(dev, "UMR recovery encountered an unexpected state=%d\n",
+ umrc->state);
+ umrc->state = MLX5_UMR_STATE_RECOVER;
+ }
+ mutex_unlock(&umrc->lock);
+
+ /* Sending a final/barrier WR (the failed one) and wait for its completion.
+ * This will ensure that all the previous WRs got a completion before
+ * we set the QP state to RESET.
+ */
+ err = mlx5r_umr_post_send(umrc->qp, mkey, &umr_context->cqe, wqe,
+ with_data);
+ if (err) {
+ mlx5_ib_warn(dev, "UMR recovery post send failed, err %d\n", err);
+ goto err;
+ }
+
+ /* Since the QP is in an error state, it will only receive
+ * IB_WC_WR_FLUSH_ERR. However, as it serves only as a barrier
+ * we don't care about its status.
+ */
+ wait_for_completion(&umr_context->done);
+
+ attr.qp_state = IB_QPS_RESET;
+ err = ib_modify_qp(umrc->qp, &attr, IB_QP_STATE);
+ if (err) {
+ mlx5_ib_warn(dev, "Couldn't modify UMR QP to RESET, err=%d\n", err);
+ goto err;
+ }
+
+ err = mlx5r_umr_qp_rst2rts(dev, umrc->qp);
+ if (err) {
+ mlx5_ib_warn(dev, "Couldn't modify UMR QP to RTS, err=%d\n", err);
+ goto err;
+ }
+
+ umrc->state = MLX5_UMR_STATE_ACTIVE;
+ return 0;
+
+err:
+ umrc->state = MLX5_UMR_STATE_ERR;
+ return err;
+}
+
static void mlx5r_umr_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct mlx5_ib_umr_context *context =
@@ -366,9 +397,7 @@ static int mlx5r_umr_post_send_wait(struct mlx5_ib_dev *dev, u32 mkey,
mlx5_ib_warn(dev,
"reg umr failed (%u). Trying to recover and resubmit the flushed WQEs, mkey = %u\n",
umr_context.status, mkey);
- mutex_lock(&umrc->lock);
- err = mlx5r_umr_recover(dev);
- mutex_unlock(&umrc->lock);
+ err = mlx5r_umr_recover(dev, mkey, &umr_context, wqe, with_data);
if (err)
mlx5_ib_warn(dev, "couldn't recover UMR, err %d\n",
err);
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index b48a72bd7b23..cd5116d8c3b2 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -2043,12 +2043,12 @@ static void set_dte_entry(struct amd_iommu *iommu,
make_clear_dte(dev_data, dte, &new);
if (domain->iop.mode != PAGE_MODE_NONE)
- new.data[0] = iommu_virt_to_phys(domain->iop.root);
+ new.data[0] |= iommu_virt_to_phys(domain->iop.root);
new.data[0] |= (domain->iop.mode & DEV_ENTRY_MODE_MASK)
<< DEV_ENTRY_MODE_SHIFT;
- new.data[0] |= DTE_FLAG_IR | DTE_FLAG_IW | DTE_FLAG_V;
+ new.data[0] |= DTE_FLAG_IR | DTE_FLAG_IW;
/*
* When SNP is enabled, we can only support TV=1 with non-zero domain ID.
diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
index 9f424acf474e..e540092d664d 100644
--- a/drivers/iommu/intel/dmar.c
+++ b/drivers/iommu/intel/dmar.c
@@ -2043,6 +2043,7 @@ int enable_drhd_fault_handling(unsigned int cpu)
/*
* Enable fault control interrupt.
*/
+ guard(rwsem_read)(&dmar_global_lock);
for_each_iommu(iommu, drhd) {
u32 fault_status;
int ret;
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index cc46098f875b..bf1f0c814348 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -3146,7 +3146,14 @@ int __init intel_iommu_init(void)
iommu_device_sysfs_add(&iommu->iommu, NULL,
intel_iommu_groups,
"%s", iommu->name);
+ /*
+ * The iommu device probe is protected by the iommu_probe_device_lock.
+ * Release the dmar_global_lock before entering the device probe path
+ * to avoid unnecessary lock order splat.
+ */
+ up_read(&dmar_global_lock);
iommu_device_register(&iommu->iommu, &intel_iommu_ops, NULL);
+ down_read(&dmar_global_lock);
iommu_pmu_register(iommu);
}
@@ -4378,9 +4385,6 @@ static int context_setup_pass_through_cb(struct pci_dev *pdev, u16 alias, void *
{
struct device *dev = data;
- if (dev != &pdev->dev)
- return 0;
-
return context_setup_pass_through(dev, PCI_BUS_NUM(alias), alias & 0xff);
}
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index ee9f7cecd78e..c45464b6576a 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -3790,20 +3790,18 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
break;
case STATUSTYPE_TABLE: {
- __u64 watermark_percentage = (__u64)(ic->journal_entries - ic->free_sectors_threshold) * 100;
-
- watermark_percentage += ic->journal_entries / 2;
- do_div(watermark_percentage, ic->journal_entries);
- arg_count = 3;
+ arg_count = 1; /* buffer_sectors */
arg_count += !!ic->meta_dev;
arg_count += ic->sectors_per_block != 1;
arg_count += !!(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING));
arg_count += ic->reset_recalculate_flag;
arg_count += ic->discard;
- arg_count += ic->mode == 'J';
- arg_count += ic->mode == 'J';
- arg_count += ic->mode == 'B';
- arg_count += ic->mode == 'B';
+ arg_count += ic->mode != 'I'; /* interleave_sectors */
+ arg_count += ic->mode == 'J'; /* journal_sectors */
+ arg_count += ic->mode == 'J'; /* journal_watermark */
+ arg_count += ic->mode == 'J'; /* commit_time */
+ arg_count += ic->mode == 'B'; /* sectors_per_bit */
+ arg_count += ic->mode == 'B'; /* bitmap_flush_interval */
arg_count += !!ic->internal_hash_alg.alg_string;
arg_count += !!ic->journal_crypt_alg.alg_string;
arg_count += !!ic->journal_mac_alg.alg_string;
@@ -3822,10 +3820,15 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
DMEMIT(" reset_recalculate");
if (ic->discard)
DMEMIT(" allow_discards");
- DMEMIT(" journal_sectors:%u", ic->initial_sectors - SB_SECTORS);
- DMEMIT(" interleave_sectors:%u", 1U << ic->sb->log2_interleave_sectors);
+ if (ic->mode != 'I')
+ DMEMIT(" interleave_sectors:%u", 1U << ic->sb->log2_interleave_sectors);
DMEMIT(" buffer_sectors:%u", 1U << ic->log2_buffer_sectors);
if (ic->mode == 'J') {
+ __u64 watermark_percentage = (__u64)(ic->journal_entries - ic->free_sectors_threshold) * 100;
+
+ watermark_percentage += ic->journal_entries / 2;
+ do_div(watermark_percentage, ic->journal_entries);
+ DMEMIT(" journal_sectors:%u", ic->initial_sectors - SB_SECTORS);
DMEMIT(" journal_watermark:%u", (unsigned int)watermark_percentage);
DMEMIT(" commit_time:%u", ic->autocommit_msec);
}
diff --git a/drivers/md/dm-vdo/dedupe.c b/drivers/md/dm-vdo/dedupe.c
index b6f8e2dc7729..3f3d29af1be4 100644
--- a/drivers/md/dm-vdo/dedupe.c
+++ b/drivers/md/dm-vdo/dedupe.c
@@ -2178,6 +2178,7 @@ static int initialize_index(struct vdo *vdo, struct hash_zones *zones)
vdo_set_dedupe_index_timeout_interval(vdo_dedupe_index_timeout_interval);
vdo_set_dedupe_index_min_timer_interval(vdo_dedupe_index_min_timer_interval);
+ spin_lock_init(&zones->lock);
/*
* Since we will save up the timeouts that would have been reported but were ratelimited,
diff --git a/drivers/net/dsa/realtek/Kconfig b/drivers/net/dsa/realtek/Kconfig
index 6989972eebc3..10687722d14c 100644
--- a/drivers/net/dsa/realtek/Kconfig
+++ b/drivers/net/dsa/realtek/Kconfig
@@ -43,4 +43,10 @@ config NET_DSA_REALTEK_RTL8366RB
help
Select to enable support for Realtek RTL8366RB.
+config NET_DSA_REALTEK_RTL8366RB_LEDS
+ bool "Support RTL8366RB LED control"
+ depends on (LEDS_CLASS=y || LEDS_CLASS=NET_DSA_REALTEK_RTL8366RB)
+ depends on NET_DSA_REALTEK_RTL8366RB
+ default NET_DSA_REALTEK_RTL8366RB
+
endif
diff --git a/drivers/net/dsa/realtek/Makefile b/drivers/net/dsa/realtek/Makefile
index 35491dc20d6d..17367bcba496 100644
--- a/drivers/net/dsa/realtek/Makefile
+++ b/drivers/net/dsa/realtek/Makefile
@@ -12,4 +12,7 @@ endif
obj-$(CONFIG_NET_DSA_REALTEK_RTL8366RB) += rtl8366.o
rtl8366-objs := rtl8366-core.o rtl8366rb.o
+ifdef CONFIG_NET_DSA_REALTEK_RTL8366RB_LEDS
+rtl8366-objs += rtl8366rb-leds.o
+endif
obj-$(CONFIG_NET_DSA_REALTEK_RTL8365MB) += rtl8365mb.o
diff --git a/drivers/net/dsa/realtek/rtl8366rb-leds.c b/drivers/net/dsa/realtek/rtl8366rb-leds.c
new file mode 100644
index 000000000000..99c890681ae6
--- /dev/null
+++ b/drivers/net/dsa/realtek/rtl8366rb-leds.c
@@ -0,0 +1,177 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bitops.h>
+#include <linux/regmap.h>
+#include <net/dsa.h>
+#include "rtl83xx.h"
+#include "rtl8366rb.h"
+
+static inline u32 rtl8366rb_led_group_port_mask(u8 led_group, u8 port)
+{
+ switch (led_group) {
+ case 0:
+ return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
+ case 1:
+ return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
+ case 2:
+ return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
+ case 3:
+ return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
+ default:
+ return 0;
+ }
+}
+
+static int rb8366rb_get_port_led(struct rtl8366rb_led *led)
+{
+ struct realtek_priv *priv = led->priv;
+ u8 led_group = led->led_group;
+ u8 port_num = led->port_num;
+ int ret;
+ u32 val;
+
+ ret = regmap_read(priv->map, RTL8366RB_LED_X_X_CTRL_REG(led_group),
+ &val);
+ if (ret) {
+ dev_err(priv->dev, "error reading LED on port %d group %d\n",
+ led_group, port_num);
+ return ret;
+ }
+
+ return !!(val & rtl8366rb_led_group_port_mask(led_group, port_num));
+}
+
+static int rb8366rb_set_port_led(struct rtl8366rb_led *led, bool enable)
+{
+ struct realtek_priv *priv = led->priv;
+ u8 led_group = led->led_group;
+ u8 port_num = led->port_num;
+ int ret;
+
+ ret = regmap_update_bits(priv->map,
+ RTL8366RB_LED_X_X_CTRL_REG(led_group),
+ rtl8366rb_led_group_port_mask(led_group,
+ port_num),
+ enable ? 0xffff : 0);
+ if (ret) {
+ dev_err(priv->dev, "error updating LED on port %d group %d\n",
+ led_group, port_num);
+ return ret;
+ }
+
+ /* Change the LED group to manual controlled LEDs if required */
+ ret = rb8366rb_set_ledgroup_mode(priv, led_group,
+ RTL8366RB_LEDGROUP_FORCE);
+
+ if (ret) {
+ dev_err(priv->dev, "error updating LED GROUP group %d\n",
+ led_group);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+rtl8366rb_cled_brightness_set_blocking(struct led_classdev *ldev,
+ enum led_brightness brightness)
+{
+ struct rtl8366rb_led *led = container_of(ldev, struct rtl8366rb_led,
+ cdev);
+
+ return rb8366rb_set_port_led(led, brightness == LED_ON);
+}
+
+static int rtl8366rb_setup_led(struct realtek_priv *priv, struct dsa_port *dp,
+ struct fwnode_handle *led_fwnode)
+{
+ struct rtl8366rb *rb = priv->chip_data;
+ struct led_init_data init_data = { };
+ enum led_default_state state;
+ struct rtl8366rb_led *led;
+ u32 led_group;
+ int ret;
+
+ ret = fwnode_property_read_u32(led_fwnode, "reg", &led_group);
+ if (ret)
+ return ret;
+
+ if (led_group >= RTL8366RB_NUM_LEDGROUPS) {
+ dev_warn(priv->dev, "Invalid LED reg %d defined for port %d",
+ led_group, dp->index);
+ return -EINVAL;
+ }
+
+ led = &rb->leds[dp->index][led_group];
+ led->port_num = dp->index;
+ led->led_group = led_group;
+ led->priv = priv;
+
+ state = led_init_default_state_get(led_fwnode);
+ switch (state) {
+ case LEDS_DEFSTATE_ON:
+ led->cdev.brightness = 1;
+ rb8366rb_set_port_led(led, 1);
+ break;
+ case LEDS_DEFSTATE_KEEP:
+ led->cdev.brightness =
+ rb8366rb_get_port_led(led);
+ break;
+ case LEDS_DEFSTATE_OFF:
+ default:
+ led->cdev.brightness = 0;
+ rb8366rb_set_port_led(led, 0);
+ }
+
+ led->cdev.max_brightness = 1;
+ led->cdev.brightness_set_blocking =
+ rtl8366rb_cled_brightness_set_blocking;
+ init_data.fwnode = led_fwnode;
+ init_data.devname_mandatory = true;
+
+ init_data.devicename = kasprintf(GFP_KERNEL, "Realtek-%d:0%d:%d",
+ dp->ds->index, dp->index, led_group);
+ if (!init_data.devicename)
+ return -ENOMEM;
+
+ ret = devm_led_classdev_register_ext(priv->dev, &led->cdev, &init_data);
+ if (ret) {
+ dev_warn(priv->dev, "Failed to init LED %d for port %d",
+ led_group, dp->index);
+ return ret;
+ }
+
+ return 0;
+}
+
+int rtl8366rb_setup_leds(struct realtek_priv *priv)
+{
+ struct dsa_switch *ds = &priv->ds;
+ struct device_node *leds_np;
+ struct dsa_port *dp;
+ int ret = 0;
+
+ dsa_switch_for_each_port(dp, ds) {
+ if (!dp->dn)
+ continue;
+
+ leds_np = of_get_child_by_name(dp->dn, "leds");
+ if (!leds_np) {
+ dev_dbg(priv->dev, "No leds defined for port %d",
+ dp->index);
+ continue;
+ }
+
+ for_each_child_of_node_scoped(leds_np, led_np) {
+ ret = rtl8366rb_setup_led(priv, dp,
+ of_fwnode_handle(led_np));
+ if (ret)
+ break;
+ }
+
+ of_node_put(leds_np);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
diff --git a/drivers/net/dsa/realtek/rtl8366rb.c b/drivers/net/dsa/realtek/rtl8366rb.c
index 4c4a95d4380c..f54771cab56d 100644
--- a/drivers/net/dsa/realtek/rtl8366rb.c
+++ b/drivers/net/dsa/realtek/rtl8366rb.c
@@ -27,11 +27,7 @@
#include "realtek-smi.h"
#include "realtek-mdio.h"
#include "rtl83xx.h"
-
-#define RTL8366RB_PORT_NUM_CPU 5
-#define RTL8366RB_NUM_PORTS 6
-#define RTL8366RB_PHY_NO_MAX 4
-#define RTL8366RB_PHY_ADDR_MAX 31
+#include "rtl8366rb.h"
/* Switch Global Configuration register */
#define RTL8366RB_SGCR 0x0000
@@ -176,39 +172,6 @@
*/
#define RTL8366RB_VLAN_INGRESS_CTRL2_REG 0x037f
-/* LED control registers */
-/* The LED blink rate is global; it is used by all triggers in all groups. */
-#define RTL8366RB_LED_BLINKRATE_REG 0x0430
-#define RTL8366RB_LED_BLINKRATE_MASK 0x0007
-#define RTL8366RB_LED_BLINKRATE_28MS 0x0000
-#define RTL8366RB_LED_BLINKRATE_56MS 0x0001
-#define RTL8366RB_LED_BLINKRATE_84MS 0x0002
-#define RTL8366RB_LED_BLINKRATE_111MS 0x0003
-#define RTL8366RB_LED_BLINKRATE_222MS 0x0004
-#define RTL8366RB_LED_BLINKRATE_446MS 0x0005
-
-/* LED trigger event for each group */
-#define RTL8366RB_LED_CTRL_REG 0x0431
-#define RTL8366RB_LED_CTRL_OFFSET(led_group) \
- (4 * (led_group))
-#define RTL8366RB_LED_CTRL_MASK(led_group) \
- (0xf << RTL8366RB_LED_CTRL_OFFSET(led_group))
-
-/* The RTL8366RB_LED_X_X registers are used to manually set the LED state only
- * when the corresponding LED group in RTL8366RB_LED_CTRL_REG is
- * RTL8366RB_LEDGROUP_FORCE. Otherwise, it is ignored.
- */
-#define RTL8366RB_LED_0_1_CTRL_REG 0x0432
-#define RTL8366RB_LED_2_3_CTRL_REG 0x0433
-#define RTL8366RB_LED_X_X_CTRL_REG(led_group) \
- ((led_group) <= 1 ? \
- RTL8366RB_LED_0_1_CTRL_REG : \
- RTL8366RB_LED_2_3_CTRL_REG)
-#define RTL8366RB_LED_0_X_CTRL_MASK GENMASK(5, 0)
-#define RTL8366RB_LED_X_1_CTRL_MASK GENMASK(11, 6)
-#define RTL8366RB_LED_2_X_CTRL_MASK GENMASK(5, 0)
-#define RTL8366RB_LED_X_3_CTRL_MASK GENMASK(11, 6)
-
#define RTL8366RB_MIB_COUNT 33
#define RTL8366RB_GLOBAL_MIB_COUNT 1
#define RTL8366RB_MIB_COUNTER_PORT_OFFSET 0x0050
@@ -244,7 +207,6 @@
#define RTL8366RB_PORT_STATUS_AN_MASK 0x0080
#define RTL8366RB_NUM_VLANS 16
-#define RTL8366RB_NUM_LEDGROUPS 4
#define RTL8366RB_NUM_VIDS 4096
#define RTL8366RB_PRIORITYMAX 7
#define RTL8366RB_NUM_FIDS 8
@@ -351,46 +313,6 @@
#define RTL8366RB_GREEN_FEATURE_TX BIT(0)
#define RTL8366RB_GREEN_FEATURE_RX BIT(2)
-enum rtl8366_ledgroup_mode {
- RTL8366RB_LEDGROUP_OFF = 0x0,
- RTL8366RB_LEDGROUP_DUP_COL = 0x1,
- RTL8366RB_LEDGROUP_LINK_ACT = 0x2,
- RTL8366RB_LEDGROUP_SPD1000 = 0x3,
- RTL8366RB_LEDGROUP_SPD100 = 0x4,
- RTL8366RB_LEDGROUP_SPD10 = 0x5,
- RTL8366RB_LEDGROUP_SPD1000_ACT = 0x6,
- RTL8366RB_LEDGROUP_SPD100_ACT = 0x7,
- RTL8366RB_LEDGROUP_SPD10_ACT = 0x8,
- RTL8366RB_LEDGROUP_SPD100_10_ACT = 0x9,
- RTL8366RB_LEDGROUP_FIBER = 0xa,
- RTL8366RB_LEDGROUP_AN_FAULT = 0xb,
- RTL8366RB_LEDGROUP_LINK_RX = 0xc,
- RTL8366RB_LEDGROUP_LINK_TX = 0xd,
- RTL8366RB_LEDGROUP_MASTER = 0xe,
- RTL8366RB_LEDGROUP_FORCE = 0xf,
-
- __RTL8366RB_LEDGROUP_MODE_MAX
-};
-
-struct rtl8366rb_led {
- u8 port_num;
- u8 led_group;
- struct realtek_priv *priv;
- struct led_classdev cdev;
-};
-
-/**
- * struct rtl8366rb - RTL8366RB-specific data
- * @max_mtu: per-port max MTU setting
- * @pvid_enabled: if PVID is set for respective port
- * @leds: per-port and per-ledgroup led info
- */
-struct rtl8366rb {
- unsigned int max_mtu[RTL8366RB_NUM_PORTS];
- bool pvid_enabled[RTL8366RB_NUM_PORTS];
- struct rtl8366rb_led leds[RTL8366RB_NUM_PORTS][RTL8366RB_NUM_LEDGROUPS];
-};
-
static struct rtl8366_mib_counter rtl8366rb_mib_counters[] = {
{ 0, 0, 4, "IfInOctets" },
{ 0, 4, 4, "EtherStatsOctets" },
@@ -831,9 +753,10 @@ static int rtl8366rb_jam_table(const struct rtl8366rb_jam_tbl_entry *jam_table,
return 0;
}
-static int rb8366rb_set_ledgroup_mode(struct realtek_priv *priv,
- u8 led_group,
- enum rtl8366_ledgroup_mode mode)
+/* This code is used also with LEDs disabled */
+int rb8366rb_set_ledgroup_mode(struct realtek_priv *priv,
+ u8 led_group,
+ enum rtl8366_ledgroup_mode mode)
{
int ret;
u32 val;
@@ -850,144 +773,7 @@ static int rb8366rb_set_ledgroup_mode(struct realtek_priv *priv,
return 0;
}
-static inline u32 rtl8366rb_led_group_port_mask(u8 led_group, u8 port)
-{
- switch (led_group) {
- case 0:
- return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
- case 1:
- return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
- case 2:
- return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
- case 3:
- return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
- default:
- return 0;
- }
-}
-
-static int rb8366rb_get_port_led(struct rtl8366rb_led *led)
-{
- struct realtek_priv *priv = led->priv;
- u8 led_group = led->led_group;
- u8 port_num = led->port_num;
- int ret;
- u32 val;
-
- ret = regmap_read(priv->map, RTL8366RB_LED_X_X_CTRL_REG(led_group),
- &val);
- if (ret) {
- dev_err(priv->dev, "error reading LED on port %d group %d\n",
- led_group, port_num);
- return ret;
- }
-
- return !!(val & rtl8366rb_led_group_port_mask(led_group, port_num));
-}
-
-static int rb8366rb_set_port_led(struct rtl8366rb_led *led, bool enable)
-{
- struct realtek_priv *priv = led->priv;
- u8 led_group = led->led_group;
- u8 port_num = led->port_num;
- int ret;
-
- ret = regmap_update_bits(priv->map,
- RTL8366RB_LED_X_X_CTRL_REG(led_group),
- rtl8366rb_led_group_port_mask(led_group,
- port_num),
- enable ? 0xffff : 0);
- if (ret) {
- dev_err(priv->dev, "error updating LED on port %d group %d\n",
- led_group, port_num);
- return ret;
- }
-
- /* Change the LED group to manual controlled LEDs if required */
- ret = rb8366rb_set_ledgroup_mode(priv, led_group,
- RTL8366RB_LEDGROUP_FORCE);
-
- if (ret) {
- dev_err(priv->dev, "error updating LED GROUP group %d\n",
- led_group);
- return ret;
- }
-
- return 0;
-}
-
-static int
-rtl8366rb_cled_brightness_set_blocking(struct led_classdev *ldev,
- enum led_brightness brightness)
-{
- struct rtl8366rb_led *led = container_of(ldev, struct rtl8366rb_led,
- cdev);
-
- return rb8366rb_set_port_led(led, brightness == LED_ON);
-}
-
-static int rtl8366rb_setup_led(struct realtek_priv *priv, struct dsa_port *dp,
- struct fwnode_handle *led_fwnode)
-{
- struct rtl8366rb *rb = priv->chip_data;
- struct led_init_data init_data = { };
- enum led_default_state state;
- struct rtl8366rb_led *led;
- u32 led_group;
- int ret;
-
- ret = fwnode_property_read_u32(led_fwnode, "reg", &led_group);
- if (ret)
- return ret;
-
- if (led_group >= RTL8366RB_NUM_LEDGROUPS) {
- dev_warn(priv->dev, "Invalid LED reg %d defined for port %d",
- led_group, dp->index);
- return -EINVAL;
- }
-
- led = &rb->leds[dp->index][led_group];
- led->port_num = dp->index;
- led->led_group = led_group;
- led->priv = priv;
-
- state = led_init_default_state_get(led_fwnode);
- switch (state) {
- case LEDS_DEFSTATE_ON:
- led->cdev.brightness = 1;
- rb8366rb_set_port_led(led, 1);
- break;
- case LEDS_DEFSTATE_KEEP:
- led->cdev.brightness =
- rb8366rb_get_port_led(led);
- break;
- case LEDS_DEFSTATE_OFF:
- default:
- led->cdev.brightness = 0;
- rb8366rb_set_port_led(led, 0);
- }
-
- led->cdev.max_brightness = 1;
- led->cdev.brightness_set_blocking =
- rtl8366rb_cled_brightness_set_blocking;
- init_data.fwnode = led_fwnode;
- init_data.devname_mandatory = true;
-
- init_data.devicename = kasprintf(GFP_KERNEL, "Realtek-%d:0%d:%d",
- dp->ds->index, dp->index, led_group);
- if (!init_data.devicename)
- return -ENOMEM;
-
- ret = devm_led_classdev_register_ext(priv->dev, &led->cdev, &init_data);
- if (ret) {
- dev_warn(priv->dev, "Failed to init LED %d for port %d",
- led_group, dp->index);
- return ret;
- }
-
- return 0;
-}
-
+/* This code is used also with LEDs disabled */
static int rtl8366rb_setup_all_leds_off(struct realtek_priv *priv)
{
int ret = 0;
@@ -1008,38 +794,6 @@ static int rtl8366rb_setup_all_leds_off(struct realtek_priv *priv)
return ret;
}
-static int rtl8366rb_setup_leds(struct realtek_priv *priv)
-{
- struct dsa_switch *ds = &priv->ds;
- struct device_node *leds_np;
- struct dsa_port *dp;
- int ret = 0;
-
- dsa_switch_for_each_port(dp, ds) {
- if (!dp->dn)
- continue;
-
- leds_np = of_get_child_by_name(dp->dn, "leds");
- if (!leds_np) {
- dev_dbg(priv->dev, "No leds defined for port %d",
- dp->index);
- continue;
- }
-
- for_each_child_of_node_scoped(leds_np, led_np) {
- ret = rtl8366rb_setup_led(priv, dp,
- of_fwnode_handle(led_np));
- if (ret)
- break;
- }
-
- of_node_put(leds_np);
- if (ret)
- return ret;
- }
- return 0;
-}
-
static int rtl8366rb_setup(struct dsa_switch *ds)
{
struct realtek_priv *priv = ds->priv;
diff --git a/drivers/net/dsa/realtek/rtl8366rb.h b/drivers/net/dsa/realtek/rtl8366rb.h
new file mode 100644
index 000000000000..685ff3275faa
--- /dev/null
+++ b/drivers/net/dsa/realtek/rtl8366rb.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef _RTL8366RB_H
+#define _RTL8366RB_H
+
+#include "realtek.h"
+
+#define RTL8366RB_PORT_NUM_CPU 5
+#define RTL8366RB_NUM_PORTS 6
+#define RTL8366RB_PHY_NO_MAX 4
+#define RTL8366RB_NUM_LEDGROUPS 4
+#define RTL8366RB_PHY_ADDR_MAX 31
+
+/* LED control registers */
+/* The LED blink rate is global; it is used by all triggers in all groups. */
+#define RTL8366RB_LED_BLINKRATE_REG 0x0430
+#define RTL8366RB_LED_BLINKRATE_MASK 0x0007
+#define RTL8366RB_LED_BLINKRATE_28MS 0x0000
+#define RTL8366RB_LED_BLINKRATE_56MS 0x0001
+#define RTL8366RB_LED_BLINKRATE_84MS 0x0002
+#define RTL8366RB_LED_BLINKRATE_111MS 0x0003
+#define RTL8366RB_LED_BLINKRATE_222MS 0x0004
+#define RTL8366RB_LED_BLINKRATE_446MS 0x0005
+
+/* LED trigger event for each group */
+#define RTL8366RB_LED_CTRL_REG 0x0431
+#define RTL8366RB_LED_CTRL_OFFSET(led_group) \
+ (4 * (led_group))
+#define RTL8366RB_LED_CTRL_MASK(led_group) \
+ (0xf << RTL8366RB_LED_CTRL_OFFSET(led_group))
+
+/* The RTL8366RB_LED_X_X registers are used to manually set the LED state only
+ * when the corresponding LED group in RTL8366RB_LED_CTRL_REG is
+ * RTL8366RB_LEDGROUP_FORCE. Otherwise, it is ignored.
+ */
+#define RTL8366RB_LED_0_1_CTRL_REG 0x0432
+#define RTL8366RB_LED_2_3_CTRL_REG 0x0433
+#define RTL8366RB_LED_X_X_CTRL_REG(led_group) \
+ ((led_group) <= 1 ? \
+ RTL8366RB_LED_0_1_CTRL_REG : \
+ RTL8366RB_LED_2_3_CTRL_REG)
+#define RTL8366RB_LED_0_X_CTRL_MASK GENMASK(5, 0)
+#define RTL8366RB_LED_X_1_CTRL_MASK GENMASK(11, 6)
+#define RTL8366RB_LED_2_X_CTRL_MASK GENMASK(5, 0)
+#define RTL8366RB_LED_X_3_CTRL_MASK GENMASK(11, 6)
+
+enum rtl8366_ledgroup_mode {
+ RTL8366RB_LEDGROUP_OFF = 0x0,
+ RTL8366RB_LEDGROUP_DUP_COL = 0x1,
+ RTL8366RB_LEDGROUP_LINK_ACT = 0x2,
+ RTL8366RB_LEDGROUP_SPD1000 = 0x3,
+ RTL8366RB_LEDGROUP_SPD100 = 0x4,
+ RTL8366RB_LEDGROUP_SPD10 = 0x5,
+ RTL8366RB_LEDGROUP_SPD1000_ACT = 0x6,
+ RTL8366RB_LEDGROUP_SPD100_ACT = 0x7,
+ RTL8366RB_LEDGROUP_SPD10_ACT = 0x8,
+ RTL8366RB_LEDGROUP_SPD100_10_ACT = 0x9,
+ RTL8366RB_LEDGROUP_FIBER = 0xa,
+ RTL8366RB_LEDGROUP_AN_FAULT = 0xb,
+ RTL8366RB_LEDGROUP_LINK_RX = 0xc,
+ RTL8366RB_LEDGROUP_LINK_TX = 0xd,
+ RTL8366RB_LEDGROUP_MASTER = 0xe,
+ RTL8366RB_LEDGROUP_FORCE = 0xf,
+
+ __RTL8366RB_LEDGROUP_MODE_MAX
+};
+
+#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8366RB_LEDS)
+
+struct rtl8366rb_led {
+ u8 port_num;
+ u8 led_group;
+ struct realtek_priv *priv;
+ struct led_classdev cdev;
+};
+
+int rtl8366rb_setup_leds(struct realtek_priv *priv);
+
+#else
+
+static inline int rtl8366rb_setup_leds(struct realtek_priv *priv)
+{
+ return 0;
+}
+
+#endif /* IS_ENABLED(CONFIG_LEDS_CLASS) */
+
+/**
+ * struct rtl8366rb - RTL8366RB-specific data
+ * @max_mtu: per-port max MTU setting
+ * @pvid_enabled: if PVID is set for respective port
+ * @leds: per-port and per-ledgroup led info
+ */
+struct rtl8366rb {
+ unsigned int max_mtu[RTL8366RB_NUM_PORTS];
+ bool pvid_enabled[RTL8366RB_NUM_PORTS];
+#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8366RB_LEDS)
+ struct rtl8366rb_led leds[RTL8366RB_NUM_PORTS][RTL8366RB_NUM_LEDGROUPS];
+#endif
+};
+
+/* This code is used also with LEDs disabled */
+int rb8366rb_set_ledgroup_mode(struct realtek_priv *priv,
+ u8 led_group,
+ enum rtl8366_ledgroup_mode mode);
+
+#endif /* _RTL8366RB_H */
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 5740c98d8c9f..2847278d9cd4 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -1279,6 +1279,8 @@ struct macb {
struct clk *rx_clk;
struct clk *tsu_clk;
struct net_device *dev;
+ /* Protects hw_stats and ethtool_stats */
+ spinlock_t stats_lock;
union {
struct macb_stats macb;
struct gem_stats gem;
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 48496209fb16..c1f57d96e63f 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -1978,10 +1978,12 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
if (status & MACB_BIT(ISR_ROVR)) {
/* We missed at least one packet */
+ spin_lock(&bp->stats_lock);
if (macb_is_gem(bp))
bp->hw_stats.gem.rx_overruns++;
else
bp->hw_stats.macb.rx_overruns++;
+ spin_unlock(&bp->stats_lock);
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
queue_writel(queue, ISR, MACB_BIT(ISR_ROVR));
@@ -3102,6 +3104,7 @@ static struct net_device_stats *gem_get_stats(struct macb *bp)
if (!netif_running(bp->dev))
return nstat;
+ spin_lock_irq(&bp->stats_lock);
gem_update_stats(bp);
nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors +
@@ -3131,6 +3134,7 @@ static struct net_device_stats *gem_get_stats(struct macb *bp)
nstat->tx_aborted_errors = hwstat->tx_excessive_collisions;
nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors;
nstat->tx_fifo_errors = hwstat->tx_underrun;
+ spin_unlock_irq(&bp->stats_lock);
return nstat;
}
@@ -3138,12 +3142,13 @@ static struct net_device_stats *gem_get_stats(struct macb *bp)
static void gem_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
- struct macb *bp;
+ struct macb *bp = netdev_priv(dev);
- bp = netdev_priv(dev);
+ spin_lock_irq(&bp->stats_lock);
gem_update_stats(bp);
memcpy(data, &bp->ethtool_stats, sizeof(u64)
* (GEM_STATS_LEN + QUEUE_STATS_LEN * MACB_MAX_QUEUES));
+ spin_unlock_irq(&bp->stats_lock);
}
static int gem_get_sset_count(struct net_device *dev, int sset)
@@ -3193,6 +3198,7 @@ static struct net_device_stats *macb_get_stats(struct net_device *dev)
return gem_get_stats(bp);
/* read stats from hardware */
+ spin_lock_irq(&bp->stats_lock);
macb_update_stats(bp);
/* Convert HW stats into netdevice stats */
@@ -3226,6 +3232,7 @@ static struct net_device_stats *macb_get_stats(struct net_device *dev)
nstat->tx_carrier_errors = hwstat->tx_carrier_errors;
nstat->tx_fifo_errors = hwstat->tx_underruns;
/* Don't know about heartbeat or window errors... */
+ spin_unlock_irq(&bp->stats_lock);
return nstat;
}
@@ -5097,6 +5104,7 @@ static int macb_probe(struct platform_device *pdev)
}
}
spin_lock_init(&bp->lock);
+ spin_lock_init(&bp->stats_lock);
/* setup capabilities */
macb_configure_caps(bp, macb_config);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index 6a6fc819dfde..2106861463e4 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -167,6 +167,24 @@ static bool enetc_skb_is_tcp(struct sk_buff *skb)
return skb->csum_offset == offsetof(struct tcphdr, check);
}
+/**
+ * enetc_unwind_tx_frame() - Unwind the DMA mappings of a multi-buffer Tx frame
+ * @tx_ring: Pointer to the Tx ring on which the buffer descriptors are located
+ * @count: Number of Tx buffer descriptors which need to be unmapped
+ * @i: Index of the last successfully mapped Tx buffer descriptor
+ */
+static void enetc_unwind_tx_frame(struct enetc_bdr *tx_ring, int count, int i)
+{
+ while (count--) {
+ struct enetc_tx_swbd *tx_swbd = &tx_ring->tx_swbd[i];
+
+ enetc_free_tx_frame(tx_ring, tx_swbd);
+ if (i == 0)
+ i = tx_ring->bd_count;
+ i--;
+ }
+}
+
static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
{
bool do_vlan, do_onestep_tstamp = false, do_twostep_tstamp = false;
@@ -279,9 +297,11 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
}
if (do_onestep_tstamp) {
- u32 lo, hi, val;
- u64 sec, nsec;
+ __be32 new_sec_l, new_nsec;
+ u32 lo, hi, nsec, val;
+ __be16 new_sec_h;
u8 *data;
+ u64 sec;
lo = enetc_rd_hot(hw, ENETC_SICTR0);
hi = enetc_rd_hot(hw, ENETC_SICTR1);
@@ -295,13 +315,38 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
/* Update originTimestamp field of Sync packet
* - 48 bits seconds field
* - 32 bits nanseconds field
+ *
+ * In addition, the UDP checksum needs to be updated
+ * by software after updating originTimestamp field,
+ * otherwise the hardware will calculate the wrong
+ * checksum when updating the correction field and
+ * update it to the packet.
*/
data = skb_mac_header(skb);
- *(__be16 *)(data + offset2) =
- htons((sec >> 32) & 0xffff);
- *(__be32 *)(data + offset2 + 2) =
- htonl(sec & 0xffffffff);
- *(__be32 *)(data + offset2 + 6) = htonl(nsec);
+ new_sec_h = htons((sec >> 32) & 0xffff);
+ new_sec_l = htonl(sec & 0xffffffff);
+ new_nsec = htonl(nsec);
+ if (udp) {
+ struct udphdr *uh = udp_hdr(skb);
+ __be32 old_sec_l, old_nsec;
+ __be16 old_sec_h;
+
+ old_sec_h = *(__be16 *)(data + offset2);
+ inet_proto_csum_replace2(&uh->check, skb, old_sec_h,
+ new_sec_h, false);
+
+ old_sec_l = *(__be32 *)(data + offset2 + 2);
+ inet_proto_csum_replace4(&uh->check, skb, old_sec_l,
+ new_sec_l, false);
+
+ old_nsec = *(__be32 *)(data + offset2 + 6);
+ inet_proto_csum_replace4(&uh->check, skb, old_nsec,
+ new_nsec, false);
+ }
+
+ *(__be16 *)(data + offset2) = new_sec_h;
+ *(__be32 *)(data + offset2 + 2) = new_sec_l;
+ *(__be32 *)(data + offset2 + 6) = new_nsec;
/* Configure single-step register */
val = ENETC_PM0_SINGLE_STEP_EN;
@@ -372,25 +417,20 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
dma_err:
dev_err(tx_ring->dev, "DMA map error");
- do {
- tx_swbd = &tx_ring->tx_swbd[i];
- enetc_free_tx_frame(tx_ring, tx_swbd);
- if (i == 0)
- i = tx_ring->bd_count;
- i--;
- } while (count--);
+ enetc_unwind_tx_frame(tx_ring, count, i);
return 0;
}
-static void enetc_map_tx_tso_hdr(struct enetc_bdr *tx_ring, struct sk_buff *skb,
- struct enetc_tx_swbd *tx_swbd,
- union enetc_tx_bd *txbd, int *i, int hdr_len,
- int data_len)
+static int enetc_map_tx_tso_hdr(struct enetc_bdr *tx_ring, struct sk_buff *skb,
+ struct enetc_tx_swbd *tx_swbd,
+ union enetc_tx_bd *txbd, int *i, int hdr_len,
+ int data_len)
{
union enetc_tx_bd txbd_tmp;
u8 flags = 0, e_flags = 0;
dma_addr_t addr;
+ int count = 1;
enetc_clear_tx_bd(&txbd_tmp);
addr = tx_ring->tso_headers_dma + *i * TSO_HEADER_SIZE;
@@ -433,7 +473,10 @@ static void enetc_map_tx_tso_hdr(struct enetc_bdr *tx_ring, struct sk_buff *skb,
/* Write the BD */
txbd_tmp.ext.e_flags = e_flags;
*txbd = txbd_tmp;
+ count++;
}
+
+ return count;
}
static int enetc_map_tx_tso_data(struct enetc_bdr *tx_ring, struct sk_buff *skb,
@@ -790,9 +833,9 @@ static int enetc_map_tx_tso_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb
/* compute the csum over the L4 header */
csum = enetc_tso_hdr_csum(&tso, skb, hdr, hdr_len, &pos);
- enetc_map_tx_tso_hdr(tx_ring, skb, tx_swbd, txbd, &i, hdr_len, data_len);
+ count += enetc_map_tx_tso_hdr(tx_ring, skb, tx_swbd, txbd,
+ &i, hdr_len, data_len);
bd_data_num = 0;
- count++;
while (data_len > 0) {
int size;
@@ -816,8 +859,13 @@ static int enetc_map_tx_tso_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb
err = enetc_map_tx_tso_data(tx_ring, skb, tx_swbd, txbd,
tso.data, size,
size == data_len);
- if (err)
+ if (err) {
+ if (i == 0)
+ i = tx_ring->bd_count;
+ i--;
+
goto err_map_data;
+ }
data_len -= size;
count++;
@@ -846,13 +894,7 @@ err_map_data:
dev_err(tx_ring->dev, "DMA map error");
err_chained_bd:
- do {
- tx_swbd = &tx_ring->tx_swbd[i];
- enetc_free_tx_frame(tx_ring, tx_swbd);
- if (i == 0)
- i = tx_ring->bd_count;
- i--;
- } while (count--);
+ enetc_unwind_tx_frame(tx_ring, count, i);
return 0;
}
@@ -1901,7 +1943,7 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
enetc_xdp_drop(rx_ring, orig_i, i);
tx_ring->stats.xdp_tx_drops++;
} else {
- tx_ring->stats.xdp_tx += xdp_tx_bd_cnt;
+ tx_ring->stats.xdp_tx++;
rx_ring->xdp.xdp_tx_in_flight += xdp_tx_bd_cnt;
xdp_tx_frm_cnt++;
/* The XDP_TX enqueue was successful, so we
@@ -3228,6 +3270,9 @@ static int enetc_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr)
new_offloads |= ENETC_F_TX_TSTAMP;
break;
case HWTSTAMP_TX_ONESTEP_SYNC:
+ if (!enetc_si_is_pf(priv->si))
+ return -EOPNOTSUPP;
+
new_offloads &= ~ENETC_F_TX_TSTAMP_MASK;
new_offloads |= ENETC_F_TX_ONESTEP_SYNC_TSTAMP;
break;
diff --git a/drivers/net/ethernet/freescale/enetc/enetc4_pf.c b/drivers/net/ethernet/freescale/enetc/enetc4_pf.c
index fc41078c4f5d..73ac8c6afb3a 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc4_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc4_pf.c
@@ -672,7 +672,6 @@ err_link_init:
err_alloc_msix:
err_config_si:
err_clk_get:
- mutex_destroy(&priv->mm_lock);
free_netdev(ndev);
return err;
@@ -684,6 +683,7 @@ static void enetc4_pf_netdev_destroy(struct enetc_si *si)
struct net_device *ndev = si->ndev;
unregister_netdev(ndev);
+ enetc4_link_deinit(priv);
enetc_free_msix(priv);
free_netdev(ndev);
}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
index bf34b5bb1e35..ece3ae28ba82 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
@@ -832,6 +832,7 @@ static int enetc_set_coalesce(struct net_device *ndev,
static int enetc_get_ts_info(struct net_device *ndev,
struct kernel_ethtool_ts_info *info)
{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
int *phc_idx;
phc_idx = symbol_get(enetc_phc_index);
@@ -852,8 +853,10 @@ static int enetc_get_ts_info(struct net_device *ndev,
SOF_TIMESTAMPING_TX_SOFTWARE;
info->tx_types = (1 << HWTSTAMP_TX_OFF) |
- (1 << HWTSTAMP_TX_ON) |
- (1 << HWTSTAMP_TX_ONESTEP_SYNC);
+ (1 << HWTSTAMP_TX_ON);
+
+ if (enetc_si_is_pf(priv->si))
+ info->tx_types |= (1 << HWTSTAMP_TX_ONESTEP_SYNC);
info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_ALL);
diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
index 8ac0047f1ada..f0674a443567 100644
--- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
@@ -109,10 +109,12 @@ static void gve_rx_reset_ring_dqo(struct gve_priv *priv, int idx)
void gve_rx_stop_ring_dqo(struct gve_priv *priv, int idx)
{
int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
+ struct gve_rx_ring *rx = &priv->rx[idx];
if (!gve_rx_was_added_to_block(priv, idx))
return;
+ page_pool_disable_direct_recycling(rx->dqo.page_pool);
gve_remove_napi(priv, ntfy_idx);
gve_rx_remove_from_block(priv, idx);
gve_rx_reset_ring_dqo(priv, idx);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 852e5b62f0a5..6faa62bced3a 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -1983,7 +1983,7 @@ err:
static void iavf_finish_config(struct work_struct *work)
{
struct iavf_adapter *adapter;
- bool netdev_released = false;
+ bool locks_released = false;
int pairs, err;
adapter = container_of(work, struct iavf_adapter, finish_config);
@@ -2012,19 +2012,22 @@ static void iavf_finish_config(struct work_struct *work)
netif_set_real_num_tx_queues(adapter->netdev, pairs);
if (adapter->netdev->reg_state != NETREG_REGISTERED) {
+ mutex_unlock(&adapter->crit_lock);
netdev_unlock(adapter->netdev);
- netdev_released = true;
+ locks_released = true;
err = register_netdevice(adapter->netdev);
if (err) {
dev_err(&adapter->pdev->dev, "Unable to register netdev (%d)\n",
err);
/* go back and try again.*/
+ mutex_lock(&adapter->crit_lock);
iavf_free_rss(adapter);
iavf_free_misc_irq(adapter);
iavf_reset_interrupt_capability(adapter);
iavf_change_state(adapter,
__IAVF_INIT_CONFIG_ADAPTER);
+ mutex_unlock(&adapter->crit_lock);
goto out;
}
}
@@ -2040,9 +2043,10 @@ static void iavf_finish_config(struct work_struct *work)
}
out:
- mutex_unlock(&adapter->crit_lock);
- if (!netdev_released)
+ if (!locks_released) {
+ mutex_unlock(&adapter->crit_lock);
netdev_unlock(adapter->netdev);
+ }
rtnl_unlock();
}
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c
index fb527434b58b..d649c197cf67 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.c
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c
@@ -38,8 +38,7 @@ static int ice_eswitch_setup_env(struct ice_pf *pf)
if (ice_vsi_add_vlan_zero(uplink_vsi))
goto err_vlan_zero;
- if (ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, true,
- ICE_FLTR_RX))
+ if (ice_set_dflt_vsi(uplink_vsi))
goto err_def_rx;
if (ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, true,
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
index b83f99c01d91..8aabf7749aa5 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
@@ -36,6 +36,7 @@ static void ice_free_vf_entries(struct ice_pf *pf)
hash_for_each_safe(vfs->table, bkt, tmp, vf, entry) {
hash_del_rcu(&vf->entry);
+ ice_deinitialize_vf_entry(vf);
ice_put_vf(vf);
}
}
@@ -193,10 +194,6 @@ void ice_free_vfs(struct ice_pf *pf)
wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
}
- /* clear malicious info since the VF is getting released */
- if (!ice_is_feature_supported(pf, ICE_F_MBX_LIMIT))
- list_del(&vf->mbx_info.list_entry);
-
mutex_unlock(&vf->cfg_lock);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
index c7c0c2f50c26..815ad0bfe832 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
@@ -1036,6 +1036,14 @@ void ice_initialize_vf_entry(struct ice_vf *vf)
mutex_init(&vf->cfg_lock);
}
+void ice_deinitialize_vf_entry(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+
+ if (!ice_is_feature_supported(pf, ICE_F_MBX_LIMIT))
+ list_del(&vf->mbx_info.list_entry);
+}
+
/**
* ice_dis_vf_qs - Disable the VF queues
* @vf: pointer to the VF structure
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h b/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h
index 0c7e77c0a09f..5392b0404986 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h
@@ -24,6 +24,7 @@
#endif
void ice_initialize_vf_entry(struct ice_vf *vf);
+void ice_deinitialize_vf_entry(struct ice_vf *vf);
void ice_dis_vf_qs(struct ice_vf *vf);
int ice_check_vf_init(struct ice_vf *vf);
enum virtchnl_status_code ice_err_to_virt_err(int err);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
index 9be6a6b59c4e..977741c41498 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
@@ -3013,7 +3013,6 @@ static int idpf_rx_rsc(struct idpf_rx_queue *rxq, struct sk_buff *skb,
skb_shinfo(skb)->gso_size = rsc_seg_len;
skb_reset_network_header(skb);
- len = skb->len - skb_transport_offset(skb);
if (ipv4) {
struct iphdr *ipv4h = ip_hdr(skb);
@@ -3022,6 +3021,7 @@ static int idpf_rx_rsc(struct idpf_rx_queue *rxq, struct sk_buff *skb,
/* Reset and set transport header offset in skb */
skb_set_transport_header(skb, sizeof(struct iphdr));
+ len = skb->len - skb_transport_offset(skb);
/* Compute the TCP pseudo header checksum*/
tcp_hdr(skb)->check =
@@ -3031,6 +3031,7 @@ static int idpf_rx_rsc(struct idpf_rx_queue *rxq, struct sk_buff *skb,
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
skb_set_transport_header(skb, sizeof(struct ipv6hdr));
+ len = skb->len - skb_transport_offset(skb);
tcp_hdr(skb)->check =
~tcp_v6_check(len, &ipv6h->saddr, &ipv6h->daddr, 0);
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c
index 683c668672d6..cb07ecd8937d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c
@@ -1122,7 +1122,7 @@ static bool ixgbe_is_media_cage_present(struct ixgbe_hw *hw)
* returns error (ENOENT), then no cage present. If no cage present then
* connection type is backplane or BASE-T.
*/
- return ixgbe_aci_get_netlist_node(hw, cmd, NULL, NULL);
+ return !ixgbe_aci_get_netlist_node(hw, cmd, NULL, NULL);
}
/**
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
index 1641791a2d5b..8ed83fb98862 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
@@ -324,7 +324,7 @@ static const struct mvpp2_cls_flow cls_flows[MVPP2_N_PRS_FLOWS] = {
MVPP2_PRS_RI_VLAN_MASK),
/* Non IP flow, with vlan tag */
MVPP2_DEF_FLOW(MVPP22_FLOW_ETHERNET, MVPP2_FL_NON_IP_TAG,
- MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP22_CLS_HEK_TAGGED,
0, 0),
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
index 8b7c843446e1..823c1ba456cd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
@@ -564,6 +564,9 @@ static int esw_qos_vport_enable(struct mlx5_vport *vport, struct mlx5_esw_sched_
return err;
esw_qos_normalize_min_rate(parent->esw, parent, extack);
+ trace_mlx5_esw_vport_qos_create(vport->dev, vport,
+ vport->qos.sched_node->max_rate,
+ vport->qos.sched_node->bw_share);
return 0;
}
@@ -591,8 +594,11 @@ static int mlx5_esw_qos_vport_enable(struct mlx5_vport *vport, enum sched_node_t
sched_node->vport = vport;
vport->qos.sched_node = sched_node;
err = esw_qos_vport_enable(vport, parent, extack);
- if (err)
+ if (err) {
+ __esw_qos_free_node(sched_node);
esw_qos_put(esw);
+ vport->qos.sched_node = NULL;
+ }
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
index 7db9cab9bedf..d9362eabc6a1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
@@ -572,7 +572,7 @@ irq_pool_alloc(struct mlx5_core_dev *dev, int start, int size, char *name,
pool->min_threshold = min_threshold * MLX5_EQ_REFS_PER_IRQ;
pool->max_threshold = max_threshold * MLX5_EQ_REFS_PER_IRQ;
mlx5_core_dbg(dev, "pool->name = %s, pool->size = %d, pool->start = %d",
- name, size, start);
+ name ? name : "mlx5_pcif_pool", size, start);
return pool;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
index bfe6e2d631bd..f5acfb7d4ff6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
@@ -516,6 +516,19 @@ static int loongson_dwmac_acpi_config(struct pci_dev *pdev,
return 0;
}
+/* Loongson's DWMAC device may take nearly two seconds to complete DMA reset */
+static int loongson_dwmac_fix_reset(void *priv, void __iomem *ioaddr)
+{
+ u32 value = readl(ioaddr + DMA_BUS_MODE);
+
+ value |= DMA_BUS_MODE_SFT_RESET;
+ writel(value, ioaddr + DMA_BUS_MODE);
+
+ return readl_poll_timeout(ioaddr + DMA_BUS_MODE, value,
+ !(value & DMA_BUS_MODE_SFT_RESET),
+ 10000, 2000000);
+}
+
static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct plat_stmmacenet_data *plat;
@@ -566,6 +579,7 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
plat->bsp_priv = ld;
plat->setup = loongson_dwmac_setup;
+ plat->fix_soc_reset = loongson_dwmac_fix_reset;
ld->dev = &pdev->dev;
ld->loongson_id = readl(res.addr + GMAC_VERSION) & 0xff;
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index 0d5a862cd78a..3a13d60a947a 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -99,6 +99,7 @@ config TI_K3_AM65_CPSW_NUSS
select NET_DEVLINK
select TI_DAVINCI_MDIO
select PHYLINK
+ select PAGE_POOL
select TI_K3_CPPI_DESC_POOL
imply PHY_TI_GMII_SEL
depends on TI_K3_AM65_CPTS || !TI_K3_AM65_CPTS
diff --git a/drivers/net/ethernet/ti/icssg/icss_iep.c b/drivers/net/ethernet/ti/icssg/icss_iep.c
index 768578c0d958..d59c1744840a 100644
--- a/drivers/net/ethernet/ti/icssg/icss_iep.c
+++ b/drivers/net/ethernet/ti/icssg/icss_iep.c
@@ -474,26 +474,7 @@ static int icss_iep_perout_enable_hw(struct icss_iep *iep,
static int icss_iep_perout_enable(struct icss_iep *iep,
struct ptp_perout_request *req, int on)
{
- int ret = 0;
-
- mutex_lock(&iep->ptp_clk_mutex);
-
- if (iep->pps_enabled) {
- ret = -EBUSY;
- goto exit;
- }
-
- if (iep->perout_enabled == !!on)
- goto exit;
-
- ret = icss_iep_perout_enable_hw(iep, req, on);
- if (!ret)
- iep->perout_enabled = !!on;
-
-exit:
- mutex_unlock(&iep->ptp_clk_mutex);
-
- return ret;
+ return -EOPNOTSUPP;
}
static void icss_iep_cap_cmp_work(struct work_struct *work)
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index fd591ddb3884..ca62188a317a 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -416,20 +416,25 @@ struct ipvl_addr *ipvlan_addr_lookup(struct ipvl_port *port, void *lyr3h,
static noinline_for_stack int ipvlan_process_v4_outbound(struct sk_buff *skb)
{
- const struct iphdr *ip4h = ip_hdr(skb);
struct net_device *dev = skb->dev;
struct net *net = dev_net(dev);
- struct rtable *rt;
int err, ret = NET_XMIT_DROP;
+ const struct iphdr *ip4h;
+ struct rtable *rt;
struct flowi4 fl4 = {
.flowi4_oif = dev->ifindex,
- .flowi4_tos = inet_dscp_to_dsfield(ip4h_dscp(ip4h)),
.flowi4_flags = FLOWI_FLAG_ANYSRC,
.flowi4_mark = skb->mark,
- .daddr = ip4h->daddr,
- .saddr = ip4h->saddr,
};
+ if (!pskb_network_may_pull(skb, sizeof(struct iphdr)))
+ goto err;
+
+ ip4h = ip_hdr(skb);
+ fl4.daddr = ip4h->daddr;
+ fl4.saddr = ip4h->saddr;
+ fl4.flowi4_tos = inet_dscp_to_dsfield(ip4h_dscp(ip4h));
+
rt = ip_route_output_flow(net, &fl4, NULL);
if (IS_ERR(rt))
goto err;
@@ -488,6 +493,12 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb)
struct net_device *dev = skb->dev;
int err, ret = NET_XMIT_DROP;
+ if (!pskb_network_may_pull(skb, sizeof(struct ipv6hdr))) {
+ DEV_STATS_INC(dev, tx_errors);
+ kfree_skb(skb);
+ return ret;
+ }
+
err = ipvlan_route_v6_outbound(dev, skb);
if (unlikely(err)) {
DEV_STATS_INC(dev, tx_errors);
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index c8840c3b9a1b..f1d68153987e 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -244,8 +244,22 @@ static netdev_tx_t blackhole_netdev_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
+static int blackhole_neigh_output(struct neighbour *n, struct sk_buff *skb)
+{
+ kfree_skb(skb);
+ return 0;
+}
+
+static int blackhole_neigh_construct(struct net_device *dev,
+ struct neighbour *n)
+{
+ n->output = blackhole_neigh_output;
+ return 0;
+}
+
static const struct net_device_ops blackhole_netdev_ops = {
.ndo_start_xmit = blackhole_netdev_xmit,
+ .ndo_neigh_construct = blackhole_neigh_construct,
};
/* This is a dst-dummy device used specifically for invalidated
diff --git a/drivers/net/netdevsim/ethtool.c b/drivers/net/netdevsim/ethtool.c
index 5c80fbee7913..7ab358616e03 100644
--- a/drivers/net/netdevsim/ethtool.c
+++ b/drivers/net/netdevsim/ethtool.c
@@ -184,9 +184,11 @@ static const struct ethtool_ops nsim_ethtool_ops = {
static void nsim_ethtool_ring_init(struct netdevsim *ns)
{
+ ns->ethtool.ring.rx_pending = 512;
ns->ethtool.ring.rx_max_pending = 4096;
ns->ethtool.ring.rx_jumbo_max_pending = 4096;
ns->ethtool.ring.rx_mini_max_pending = 4096;
+ ns->ethtool.ring.tx_pending = 512;
ns->ethtool.ring.tx_max_pending = 4096;
}
diff --git a/drivers/net/phy/qcom/qca807x.c b/drivers/net/phy/qcom/qca807x.c
index 3279de857b47..2ad8c2586d64 100644
--- a/drivers/net/phy/qcom/qca807x.c
+++ b/drivers/net/phy/qcom/qca807x.c
@@ -774,7 +774,7 @@ static int qca807x_config_init(struct phy_device *phydev)
control_dac &= ~QCA807X_CONTROL_DAC_MASK;
if (!priv->dac_full_amplitude)
control_dac |= QCA807X_CONTROL_DAC_DSP_AMPLITUDE;
- if (!priv->dac_full_amplitude)
+ if (!priv->dac_full_bias_current)
control_dac |= QCA807X_CONTROL_DAC_DSP_BIAS_CURRENT;
if (!priv->dac_disable_bias_current_tweak)
control_dac |= QCA807X_CONTROL_DAC_BIAS_CURRENT_TWEAK;
diff --git a/drivers/net/usb/gl620a.c b/drivers/net/usb/gl620a.c
index 46af78caf457..0bfa37c14059 100644
--- a/drivers/net/usb/gl620a.c
+++ b/drivers/net/usb/gl620a.c
@@ -179,9 +179,7 @@ static int genelink_bind(struct usbnet *dev, struct usb_interface *intf)
{
dev->hard_mtu = GL_RCV_BUF_SIZE;
dev->net->hard_header_len += 4;
- dev->in = usb_rcvbulkpipe(dev->udev, dev->driver_info->in);
- dev->out = usb_sndbulkpipe(dev->udev, dev->driver_info->out);
- return 0;
+ return usbnet_get_endpoints(dev, intf);
}
static const struct driver_info genelink_info = {
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index be0890e4e706..f1cfe0bb89b2 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1669,13 +1669,6 @@ static blk_status_t scsi_prepare_cmd(struct request *req)
if (in_flight)
__set_bit(SCMD_STATE_INFLIGHT, &cmd->state);
- /*
- * Only clear the driver-private command data if the LLD does not supply
- * a function to initialize that data.
- */
- if (!shost->hostt->init_cmd_priv)
- memset(cmd + 1, 0, shost->hostt->cmd_size);
-
cmd->prot_op = SCSI_PROT_NORMAL;
if (blk_rq_bytes(req))
cmd->sc_data_direction = rq_dma_dir(req);
@@ -1842,6 +1835,13 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
if (!scsi_host_queue_ready(q, shost, sdev, cmd))
goto out_dec_target_busy;
+ /*
+ * Only clear the driver-private command data if the LLD does not supply
+ * a function to initialize that data.
+ */
+ if (shost->hostt->cmd_size && !shost->hostt->init_cmd_priv)
+ memset(cmd + 1, 0, shost->hostt->cmd_size);
+
if (!(req->rq_flags & RQF_DONTPREP)) {
ret = scsi_prepare_cmd(req);
if (ret != BLK_STS_OK)
diff --git a/drivers/thermal/gov_power_allocator.c b/drivers/thermal/gov_power_allocator.c
index 3b644de3292e..0d9f636c80f4 100644
--- a/drivers/thermal/gov_power_allocator.c
+++ b/drivers/thermal/gov_power_allocator.c
@@ -370,7 +370,7 @@ static void divvy_up_power(struct power_actor *power, int num_actors,
for (i = 0; i < num_actors; i++) {
struct power_actor *pa = &power[i];
- u64 req_range = (u64)pa->req_power * power_range;
+ u64 req_range = (u64)pa->weighted_req_power * power_range;
pa->granted_power = DIV_ROUND_CLOSEST_ULL(req_range,
total_req_power);
@@ -641,6 +641,22 @@ clean_state:
return ret;
}
+static void power_allocator_update_weight(struct power_allocator_params *params)
+{
+ const struct thermal_trip_desc *td;
+ struct thermal_instance *instance;
+
+ if (!params->trip_max)
+ return;
+
+ td = trip_to_trip_desc(params->trip_max);
+
+ params->total_weight = 0;
+ list_for_each_entry(instance, &td->thermal_instances, trip_node)
+ if (power_actor_is_valid(instance))
+ params->total_weight += instance->weight;
+}
+
static void power_allocator_update_tz(struct thermal_zone_device *tz,
enum thermal_notify_event reason)
{
@@ -656,16 +672,12 @@ static void power_allocator_update_tz(struct thermal_zone_device *tz,
if (power_actor_is_valid(instance))
num_actors++;
- if (num_actors == params->num_actors)
- return;
+ if (num_actors != params->num_actors)
+ allocate_actors_buffer(params, num_actors);
- allocate_actors_buffer(params, num_actors);
- break;
+ fallthrough;
case THERMAL_INSTANCE_WEIGHT_CHANGED:
- params->total_weight = 0;
- list_for_each_entry(instance, &td->thermal_instances, trip_node)
- if (power_actor_is_valid(instance))
- params->total_weight += instance->weight;
+ power_allocator_update_weight(params);
break;
default:
break;
@@ -731,6 +743,8 @@ static int power_allocator_bind(struct thermal_zone_device *tz)
tz->governor_data = params;
+ power_allocator_update_weight(params);
+
return 0;
free_params:
diff --git a/drivers/thermal/thermal_of.c b/drivers/thermal/thermal_of.c
index 5ab4ce4daaeb..5401f03d6b6c 100644
--- a/drivers/thermal/thermal_of.c
+++ b/drivers/thermal/thermal_of.c
@@ -274,6 +274,34 @@ static bool thermal_of_get_cooling_spec(struct device_node *map_np, int index,
return true;
}
+static bool thermal_of_cm_lookup(struct device_node *cm_np,
+ const struct thermal_trip *trip,
+ struct thermal_cooling_device *cdev,
+ struct cooling_spec *c)
+{
+ for_each_child_of_node_scoped(cm_np, child) {
+ struct device_node *tr_np;
+ int count, i;
+
+ tr_np = of_parse_phandle(child, "trip", 0);
+ if (tr_np != trip->priv)
+ continue;
+
+ /* The trip has been found, look up the cdev. */
+ count = of_count_phandle_with_args(child, "cooling-device",
+ "#cooling-cells");
+ if (count <= 0)
+ pr_err("Add a cooling_device property with at least one device\n");
+
+ for (i = 0; i < count; i++) {
+ if (thermal_of_get_cooling_spec(child, i, cdev, c))
+ return true;
+ }
+ }
+
+ return false;
+}
+
static bool thermal_of_should_bind(struct thermal_zone_device *tz,
const struct thermal_trip *trip,
struct thermal_cooling_device *cdev,
@@ -293,27 +321,7 @@ static bool thermal_of_should_bind(struct thermal_zone_device *tz,
goto out;
/* Look up the trip and the cdev in the cooling maps. */
- for_each_child_of_node_scoped(cm_np, child) {
- struct device_node *tr_np;
- int count, i;
-
- tr_np = of_parse_phandle(child, "trip", 0);
- if (tr_np != trip->priv)
- continue;
-
- /* The trip has been found, look up the cdev. */
- count = of_count_phandle_with_args(child, "cooling-device", "#cooling-cells");
- if (count <= 0)
- pr_err("Add a cooling_device property with at least one device\n");
-
- for (i = 0; i < count; i++) {
- result = thermal_of_get_cooling_spec(child, i, cdev, c);
- if (result)
- break;
- }
-
- break;
- }
+ result = thermal_of_cm_lookup(cm_np, trip, cdev, c);
of_node_put(cm_np);
out:
diff --git a/drivers/ufs/core/ufs_bsg.c b/drivers/ufs/core/ufs_bsg.c
index 8d4ad0a3f2cf..252186124669 100644
--- a/drivers/ufs/core/ufs_bsg.c
+++ b/drivers/ufs/core/ufs_bsg.c
@@ -194,10 +194,12 @@ out:
ufshcd_rpm_put_sync(hba);
kfree(buff);
bsg_reply->result = ret;
- job->reply_len = !rpmb ? sizeof(struct ufs_bsg_reply) : sizeof(struct ufs_rpmb_reply);
/* complete the job here only if no error */
- if (ret == 0)
+ if (ret == 0) {
+ job->reply_len = rpmb ? sizeof(struct ufs_rpmb_reply) :
+ sizeof(struct ufs_bsg_reply);
bsg_job_done(job, ret, bsg_reply->reply_payload_rcv_len);
+ }
return ret;
}
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 1893a7ad9531..464f13da259a 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -266,7 +266,7 @@ static bool ufshcd_has_pending_tasks(struct ufs_hba *hba)
static bool ufshcd_is_ufs_dev_busy(struct ufs_hba *hba)
{
- return hba->outstanding_reqs || ufshcd_has_pending_tasks(hba);
+ return scsi_host_busy(hba->host) || ufshcd_has_pending_tasks(hba);
}
static const struct ufs_dev_quirk ufs_fixups[] = {
@@ -628,8 +628,8 @@ static void ufshcd_print_host_state(struct ufs_hba *hba)
const struct scsi_device *sdev_ufs = hba->ufs_device_wlun;
dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
- dev_err(hba->dev, "outstanding reqs=0x%lx tasks=0x%lx\n",
- hba->outstanding_reqs, hba->outstanding_tasks);
+ dev_err(hba->dev, "%d outstanding reqs, tasks=0x%lx\n",
+ scsi_host_busy(hba->host), hba->outstanding_tasks);
dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n",
hba->saved_err, hba->saved_uic_err);
dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
@@ -8882,7 +8882,7 @@ static enum scsi_timeout_action ufshcd_eh_timed_out(struct scsi_cmnd *scmd)
dev_info(hba->dev, "%s() finished; outstanding_tasks = %#lx.\n",
__func__, hba->outstanding_tasks);
- return hba->outstanding_reqs ? SCSI_EH_RESET_TIMER : SCSI_EH_DONE;
+ return scsi_host_busy(hba->host) ? SCSI_EH_RESET_TIMER : SCSI_EH_DONE;
}
static const struct attribute_group *ufshcd_driver_groups[] = {
@@ -10431,6 +10431,21 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
*/
spin_lock_init(&hba->clk_gating.lock);
+ /*
+ * Set the default power management level for runtime and system PM.
+ * Host controller drivers can override them in their
+ * 'ufs_hba_variant_ops::init' callback.
+ *
+ * Default power saving mode is to keep UFS link in Hibern8 state
+ * and UFS device in sleep state.
+ */
+ hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
+ UFS_SLEEP_PWR_MODE,
+ UIC_LINK_HIBERN8_STATE);
+ hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
+ UFS_SLEEP_PWR_MODE,
+ UIC_LINK_HIBERN8_STATE);
+
err = ufshcd_hba_init(hba);
if (err)
goto out_error;
@@ -10544,21 +10559,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
goto out_disable;
}
- /*
- * Set the default power management level for runtime and system PM if
- * not set by the host controller drivers.
- * Default power saving mode is to keep UFS link in Hibern8 state
- * and UFS device in sleep state.
- */
- if (!hba->rpm_lvl)
- hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
- UFS_SLEEP_PWR_MODE,
- UIC_LINK_HIBERN8_STATE);
- if (!hba->spm_lvl)
- hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
- UFS_SLEEP_PWR_MODE,
- UIC_LINK_HIBERN8_STATE);
-
INIT_DELAYED_WORK(&hba->rpm_dev_flush_recheck_work, ufshcd_rpm_dev_flush_recheck_work);
INIT_DELAYED_WORK(&hba->ufs_rtc_update_work, ufshcd_rtc_work);
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 9ac25d08f473..63612faeab72 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -666,7 +666,7 @@ static struct vhost_worker *vhost_worker_create(struct vhost_dev *dev)
vtsk = vhost_task_create(vhost_run_work_list, vhost_worker_killed,
worker, name);
- if (!vtsk)
+ if (IS_ERR(vtsk))
goto free_worker;
mutex_init(&worker->mutex);
diff --git a/fs/afs/server.c b/fs/afs/server.c
index 038f9d0ae3af..4504e16b458c 100644
--- a/fs/afs/server.c
+++ b/fs/afs/server.c
@@ -163,6 +163,8 @@ static struct afs_server *afs_install_server(struct afs_cell *cell,
rb_insert_color(&server->uuid_rb, &net->fs_servers);
hlist_add_head_rcu(&server->proc_link, &net->fs_proc);
+ afs_get_cell(cell, afs_cell_trace_get_server);
+
added_dup:
write_seqlock(&net->fs_addr_lock);
estate = rcu_dereference_protected(server->endpoint_state,
@@ -442,6 +444,7 @@ static void afs_server_rcu(struct rcu_head *rcu)
atomic_read(&server->active), afs_server_trace_free);
afs_put_endpoint_state(rcu_access_pointer(server->endpoint_state),
afs_estate_trace_put_server);
+ afs_put_cell(server->cell, afs_cell_trace_put_server);
kfree(server);
}
diff --git a/fs/afs/server_list.c b/fs/afs/server_list.c
index 7e7e567a7f8a..d20cd902ef94 100644
--- a/fs/afs/server_list.c
+++ b/fs/afs/server_list.c
@@ -97,8 +97,8 @@ struct afs_server_list *afs_alloc_server_list(struct afs_volume *volume,
break;
if (j < slist->nr_servers) {
if (slist->servers[j].server == server) {
- afs_put_server(volume->cell->net, server,
- afs_server_trace_put_slist_isort);
+ afs_unuse_server(volume->cell->net, server,
+ afs_server_trace_put_slist_isort);
continue;
}
diff --git a/fs/bcachefs/btree_cache.c b/fs/bcachefs/btree_cache.c
index ca755e8d1a37..1ec1f90e0eb3 100644
--- a/fs/bcachefs/btree_cache.c
+++ b/fs/bcachefs/btree_cache.c
@@ -203,7 +203,7 @@ struct btree *__bch2_btree_node_mem_alloc(struct bch_fs *c)
return NULL;
}
- bch2_btree_lock_init(&b->c, 0);
+ bch2_btree_lock_init(&b->c, 0, GFP_KERNEL);
__bch2_btree_node_to_freelist(bc, b);
return b;
@@ -795,17 +795,18 @@ struct btree *bch2_btree_node_mem_alloc(struct btree_trans *trans, bool pcpu_rea
}
b = __btree_node_mem_alloc(c, GFP_NOWAIT|__GFP_NOWARN);
- if (!b) {
+ if (b) {
+ bch2_btree_lock_init(&b->c, pcpu_read_locks ? SIX_LOCK_INIT_PCPU : 0, GFP_NOWAIT);
+ } else {
mutex_unlock(&bc->lock);
bch2_trans_unlock(trans);
b = __btree_node_mem_alloc(c, GFP_KERNEL);
if (!b)
goto err;
+ bch2_btree_lock_init(&b->c, pcpu_read_locks ? SIX_LOCK_INIT_PCPU : 0, GFP_KERNEL);
mutex_lock(&bc->lock);
}
- bch2_btree_lock_init(&b->c, pcpu_read_locks ? SIX_LOCK_INIT_PCPU : 0);
-
BUG_ON(!six_trylock_intent(&b->c.lock));
BUG_ON(!six_trylock_write(&b->c.lock));
diff --git a/fs/bcachefs/btree_io.c b/fs/bcachefs/btree_io.c
index e371e60e3133..dece27d9db04 100644
--- a/fs/bcachefs/btree_io.c
+++ b/fs/bcachefs/btree_io.c
@@ -996,7 +996,7 @@ drop_this_key:
}
got_good_key:
le16_add_cpu(&i->u64s, -next_good_key);
- memmove_u64s_down(k, bkey_p_next(k), (u64 *) vstruct_end(i) - (u64 *) k);
+ memmove_u64s_down(k, (u64 *) k + next_good_key, (u64 *) vstruct_end(i) - (u64 *) k);
set_btree_node_need_rewrite(b);
}
fsck_err:
diff --git a/fs/bcachefs/btree_key_cache.c b/fs/bcachefs/btree_key_cache.c
index 1821f40c161a..edce59433375 100644
--- a/fs/bcachefs/btree_key_cache.c
+++ b/fs/bcachefs/btree_key_cache.c
@@ -156,7 +156,7 @@ bkey_cached_alloc(struct btree_trans *trans, struct btree_path *path, unsigned k
}
if (ck) {
- bch2_btree_lock_init(&ck->c, pcpu_readers ? SIX_LOCK_INIT_PCPU : 0);
+ bch2_btree_lock_init(&ck->c, pcpu_readers ? SIX_LOCK_INIT_PCPU : 0, GFP_KERNEL);
ck->c.cached = true;
goto lock;
}
diff --git a/fs/bcachefs/btree_locking.c b/fs/bcachefs/btree_locking.c
index 10b805a60f52..caef65adeae4 100644
--- a/fs/bcachefs/btree_locking.c
+++ b/fs/bcachefs/btree_locking.c
@@ -7,9 +7,10 @@
static struct lock_class_key bch2_btree_node_lock_key;
void bch2_btree_lock_init(struct btree_bkey_cached_common *b,
- enum six_lock_init_flags flags)
+ enum six_lock_init_flags flags,
+ gfp_t gfp)
{
- __six_lock_init(&b->lock, "b->c.lock", &bch2_btree_node_lock_key, flags);
+ __six_lock_init(&b->lock, "b->c.lock", &bch2_btree_node_lock_key, flags, gfp);
lockdep_set_notrack_class(&b->lock);
}
diff --git a/fs/bcachefs/btree_locking.h b/fs/bcachefs/btree_locking.h
index b54ef48eb8cc..b33ab7af8440 100644
--- a/fs/bcachefs/btree_locking.h
+++ b/fs/bcachefs/btree_locking.h
@@ -13,7 +13,7 @@
#include "btree_iter.h"
#include "six.h"
-void bch2_btree_lock_init(struct btree_bkey_cached_common *, enum six_lock_init_flags);
+void bch2_btree_lock_init(struct btree_bkey_cached_common *, enum six_lock_init_flags, gfp_t gfp);
void bch2_trans_unlock_noassert(struct btree_trans *);
void bch2_trans_unlock_write(struct btree_trans *);
diff --git a/fs/bcachefs/data_update.c b/fs/bcachefs/data_update.c
index 337494facac6..642fbc60ecab 100644
--- a/fs/bcachefs/data_update.c
+++ b/fs/bcachefs/data_update.c
@@ -340,6 +340,7 @@ restart_drop_extra_replicas:
struct printbuf buf = PRINTBUF;
prt_str(&buf, "about to insert invalid key in data update path");
+ prt_printf(&buf, "\nop.nonce: %u", m->op.nonce);
prt_str(&buf, "\nold: ");
bch2_bkey_val_to_text(&buf, c, old);
prt_str(&buf, "\nk: ");
diff --git a/fs/bcachefs/dirent.h b/fs/bcachefs/dirent.h
index a633f83c1ac7..362b3b2f2f2e 100644
--- a/fs/bcachefs/dirent.h
+++ b/fs/bcachefs/dirent.h
@@ -31,11 +31,6 @@ static inline unsigned dirent_val_u64s(unsigned len)
sizeof(u64));
}
-static inline unsigned int dirent_occupied_size(const struct qstr *name)
-{
- return (BKEY_U64s + dirent_val_u64s(name->len)) * sizeof(u64);
-}
-
int bch2_dirent_read_target(struct btree_trans *, subvol_inum,
struct bkey_s_c_dirent, subvol_inum *);
diff --git a/fs/bcachefs/extents.h b/fs/bcachefs/extents.h
index 620b284aa34f..204d765dd74c 100644
--- a/fs/bcachefs/extents.h
+++ b/fs/bcachefs/extents.h
@@ -704,7 +704,7 @@ static inline bool bch2_extent_ptr_eq(struct bch_extent_ptr ptr1,
ptr1.unwritten == ptr2.unwritten &&
ptr1.offset == ptr2.offset &&
ptr1.dev == ptr2.dev &&
- ptr1.dev == ptr2.dev);
+ ptr1.gen == ptr2.gen);
}
void bch2_ptr_swab(struct bkey_s);
diff --git a/fs/bcachefs/fs-common.c b/fs/bcachefs/fs-common.c
index d70d9f634cea..2c3d46ac70c6 100644
--- a/fs/bcachefs/fs-common.c
+++ b/fs/bcachefs/fs-common.c
@@ -152,7 +152,6 @@ int bch2_create_trans(struct btree_trans *trans,
if (is_subdir_for_nlink(new_inode))
dir_u->bi_nlink++;
dir_u->bi_mtime = dir_u->bi_ctime = now;
- dir_u->bi_size += dirent_occupied_size(name);
ret = bch2_inode_write(trans, &dir_iter, dir_u);
if (ret)
@@ -221,7 +220,6 @@ int bch2_link_trans(struct btree_trans *trans,
}
dir_u->bi_mtime = dir_u->bi_ctime = now;
- dir_u->bi_size += dirent_occupied_size(name);
dir_hash = bch2_hash_info_init(c, dir_u);
@@ -324,7 +322,6 @@ int bch2_unlink_trans(struct btree_trans *trans,
dir_u->bi_mtime = dir_u->bi_ctime = inode_u->bi_ctime = now;
dir_u->bi_nlink -= is_subdir_for_nlink(inode_u);
- dir_u->bi_size -= dirent_occupied_size(name);
ret = bch2_hash_delete_at(trans, bch2_dirent_hash_desc,
&dir_hash, &dirent_iter,
@@ -463,14 +460,6 @@ int bch2_rename_trans(struct btree_trans *trans,
goto err;
}
- if (mode == BCH_RENAME) {
- src_dir_u->bi_size -= dirent_occupied_size(src_name);
- dst_dir_u->bi_size += dirent_occupied_size(dst_name);
- }
-
- if (mode == BCH_RENAME_OVERWRITE)
- src_dir_u->bi_size -= dirent_occupied_size(src_name);
-
if (src_inode_u->bi_parent_subvol)
src_inode_u->bi_parent_subvol = dst_dir.subvol;
diff --git a/fs/bcachefs/fs-io.c b/fs/bcachefs/fs-io.c
index 94bf34b9b65f..717e7b94c66f 100644
--- a/fs/bcachefs/fs-io.c
+++ b/fs/bcachefs/fs-io.c
@@ -466,6 +466,7 @@ int bchfs_truncate(struct mnt_idmap *idmap,
ret = bch2_truncate_folio(inode, iattr->ia_size);
if (unlikely(ret < 0))
goto err;
+ ret = 0;
truncate_setsize(&inode->v, iattr->ia_size);
diff --git a/fs/bcachefs/fsck.c b/fs/bcachefs/fsck.c
index 9bf316e7b845..0e85131d0af8 100644
--- a/fs/bcachefs/fsck.c
+++ b/fs/bcachefs/fsck.c
@@ -1978,31 +1978,10 @@ fsck_err:
return ret;
}
-static int check_dir_i_size_notnested(struct btree_trans *trans, struct inode_walker *w)
-{
- struct bch_fs *c = trans->c;
- int ret = 0;
-
- darray_for_each(w->inodes, i)
- if (fsck_err_on(i->inode.bi_size != i->i_size,
- trans, inode_dir_wrong_nlink,
- "directory %llu:%u with wrong i_size: got %llu, should be %llu",
- w->last_pos.inode, i->snapshot, i->inode.bi_size, i->i_size)) {
- i->inode.bi_size = i->i_size;
- ret = bch2_fsck_write_inode(trans, &i->inode);
- if (ret)
- break;
- }
-fsck_err:
- bch_err_fn(c, ret);
- return ret;
-}
-
static int check_subdir_dirents_count(struct btree_trans *trans, struct inode_walker *w)
{
u32 restart_count = trans->restart_count;
return check_subdir_count_notnested(trans, w) ?:
- check_dir_i_size_notnested(trans, w) ?:
trans_was_restarted(trans, restart_count);
}
diff --git a/fs/bcachefs/journal.c b/fs/bcachefs/journal.c
index 24c294d4634e..5dabbf3c0965 100644
--- a/fs/bcachefs/journal.c
+++ b/fs/bcachefs/journal.c
@@ -1194,7 +1194,9 @@ int bch2_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca,
closure_sync(&cl);
- if (ret && ret != -BCH_ERR_bucket_alloc_blocked)
+ if (ret &&
+ ret != -BCH_ERR_bucket_alloc_blocked &&
+ ret != -BCH_ERR_open_buckets_empty)
break;
}
diff --git a/fs/bcachefs/sb-downgrade.c b/fs/bcachefs/sb-downgrade.c
index 35e07bc8fbd3..051214fdc735 100644
--- a/fs/bcachefs/sb-downgrade.c
+++ b/fs/bcachefs/sb-downgrade.c
@@ -90,10 +90,7 @@
BIT_ULL(BCH_RECOVERY_PASS_check_allocations), \
BCH_FSCK_ERR_accounting_mismatch, \
BCH_FSCK_ERR_accounting_key_replicas_nr_devs_0, \
- BCH_FSCK_ERR_accounting_key_junk_at_end) \
- x(directory_size, \
- BIT_ULL(BCH_RECOVERY_PASS_check_dirents), \
- BCH_FSCK_ERR_directory_size_mismatch) \
+ BCH_FSCK_ERR_accounting_key_junk_at_end)
#define DOWNGRADE_TABLE() \
x(bucket_stripe_sectors, \
diff --git a/fs/bcachefs/six.c b/fs/bcachefs/six.c
index 7e7c66a1e1a6..7c403427fbdb 100644
--- a/fs/bcachefs/six.c
+++ b/fs/bcachefs/six.c
@@ -850,7 +850,8 @@ void six_lock_exit(struct six_lock *lock)
EXPORT_SYMBOL_GPL(six_lock_exit);
void __six_lock_init(struct six_lock *lock, const char *name,
- struct lock_class_key *key, enum six_lock_init_flags flags)
+ struct lock_class_key *key, enum six_lock_init_flags flags,
+ gfp_t gfp)
{
atomic_set(&lock->state, 0);
raw_spin_lock_init(&lock->wait_lock);
@@ -873,7 +874,7 @@ void __six_lock_init(struct six_lock *lock, const char *name,
* failure if they wish by checking lock->readers, but generally
* will not want to treat it as an error.
*/
- lock->readers = alloc_percpu(unsigned);
+ lock->readers = alloc_percpu_gfp(unsigned, gfp);
}
#endif
}
diff --git a/fs/bcachefs/six.h b/fs/bcachefs/six.h
index c142e06b7a3a..59b851cf8bac 100644
--- a/fs/bcachefs/six.h
+++ b/fs/bcachefs/six.h
@@ -164,18 +164,19 @@ enum six_lock_init_flags {
};
void __six_lock_init(struct six_lock *lock, const char *name,
- struct lock_class_key *key, enum six_lock_init_flags flags);
+ struct lock_class_key *key, enum six_lock_init_flags flags,
+ gfp_t gfp);
/**
* six_lock_init - initialize a six lock
* @lock: lock to initialize
* @flags: optional flags, i.e. SIX_LOCK_INIT_PCPU
*/
-#define six_lock_init(lock, flags) \
+#define six_lock_init(lock, flags, gfp) \
do { \
static struct lock_class_key __key; \
\
- __six_lock_init((lock), #lock, &__key, flags); \
+ __six_lock_init((lock), #lock, &__key, flags, gfp); \
} while (0)
/**
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index 67ce85ff0ae2..7f46abbd6311 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -1128,6 +1128,8 @@ static long btrfs_scan_inode(struct btrfs_inode *inode, struct btrfs_em_shrink_c
long nr_dropped = 0;
struct rb_node *node;
+ lockdep_assert_held_write(&tree->lock);
+
/*
* Take the mmap lock so that we serialize with the inode logging phase
* of fsync because we may need to set the full sync flag on the inode,
@@ -1139,28 +1141,12 @@ static long btrfs_scan_inode(struct btrfs_inode *inode, struct btrfs_em_shrink_c
* to find new extents, which may not be there yet because ordered
* extents haven't completed yet.
*
- * We also do a try lock because otherwise we could deadlock. This is
- * because the shrinker for this filesystem may be invoked while we are
- * in a path that is holding the mmap lock in write mode. For example in
- * a reflink operation while COWing an extent buffer, when allocating
- * pages for a new extent buffer and under memory pressure, the shrinker
- * may be invoked, and therefore we would deadlock by attempting to read
- * lock the mmap lock while we are holding already a write lock on it.
+ * We also do a try lock because we don't want to block for too long and
+ * we are holding the extent map tree's lock in write mode.
*/
if (!down_read_trylock(&inode->i_mmap_lock))
return 0;
- /*
- * We want to be fast so if the lock is busy we don't want to spend time
- * waiting for it - either some task is about to do IO for the inode or
- * we may have another task shrinking extent maps, here in this code, so
- * skip this inode.
- */
- if (!write_trylock(&tree->lock)) {
- up_read(&inode->i_mmap_lock);
- return 0;
- }
-
node = rb_first(&tree->root);
while (node) {
struct rb_node *next = rb_next(node);
@@ -1201,12 +1187,61 @@ next:
break;
node = next;
}
- write_unlock(&tree->lock);
up_read(&inode->i_mmap_lock);
return nr_dropped;
}
+static struct btrfs_inode *find_first_inode_to_shrink(struct btrfs_root *root,
+ u64 min_ino)
+{
+ struct btrfs_inode *inode;
+ unsigned long from = min_ino;
+
+ xa_lock(&root->inodes);
+ while (true) {
+ struct extent_map_tree *tree;
+
+ inode = xa_find(&root->inodes, &from, ULONG_MAX, XA_PRESENT);
+ if (!inode)
+ break;
+
+ tree = &inode->extent_tree;
+
+ /*
+ * We want to be fast so if the lock is busy we don't want to
+ * spend time waiting for it (some task is about to do IO for
+ * the inode).
+ */
+ if (!write_trylock(&tree->lock))
+ goto next;
+
+ /*
+ * Skip inode if it doesn't have loaded extent maps, so we avoid
+ * getting a reference and doing an iput later. This includes
+ * cases like files that were opened for things like stat(2), or
+ * files with all extent maps previously released through the
+ * release folio callback (btrfs_release_folio()) or released in
+ * a previous run, or directories which never have extent maps.
+ */
+ if (RB_EMPTY_ROOT(&tree->root)) {
+ write_unlock(&tree->lock);
+ goto next;
+ }
+
+ if (igrab(&inode->vfs_inode))
+ break;
+
+ write_unlock(&tree->lock);
+next:
+ from = btrfs_ino(inode) + 1;
+ cond_resched_lock(&root->inodes.xa_lock);
+ }
+ xa_unlock(&root->inodes);
+
+ return inode;
+}
+
static long btrfs_scan_root(struct btrfs_root *root, struct btrfs_em_shrink_ctx *ctx)
{
struct btrfs_fs_info *fs_info = root->fs_info;
@@ -1214,21 +1249,21 @@ static long btrfs_scan_root(struct btrfs_root *root, struct btrfs_em_shrink_ctx
long nr_dropped = 0;
u64 min_ino = fs_info->em_shrinker_last_ino + 1;
- inode = btrfs_find_first_inode(root, min_ino);
+ inode = find_first_inode_to_shrink(root, min_ino);
while (inode) {
nr_dropped += btrfs_scan_inode(inode, ctx);
+ write_unlock(&inode->extent_tree.lock);
min_ino = btrfs_ino(inode) + 1;
fs_info->em_shrinker_last_ino = btrfs_ino(inode);
- btrfs_add_delayed_iput(inode);
+ iput(&inode->vfs_inode);
- if (ctx->scanned >= ctx->nr_to_scan ||
- btrfs_fs_closing(inode->root->fs_info))
+ if (ctx->scanned >= ctx->nr_to_scan || btrfs_fs_closing(fs_info))
break;
cond_resched();
- inode = btrfs_find_first_inode(root, min_ino);
+ inode = find_first_inode_to_shrink(root, min_ino);
}
if (inode) {
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index ed3c0d6546c5..0b568c8d24cb 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -1090,7 +1090,7 @@ ssize_t btrfs_buffered_write(struct kiocb *iocb, struct iov_iter *i)
u64 lockend;
size_t num_written = 0;
ssize_t ret;
- loff_t old_isize = i_size_read(inode);
+ loff_t old_isize;
unsigned int ilock_flags = 0;
const bool nowait = (iocb->ki_flags & IOCB_NOWAIT);
unsigned int bdp_flags = (nowait ? BDP_ASYNC : 0);
@@ -1103,6 +1103,13 @@ ssize_t btrfs_buffered_write(struct kiocb *iocb, struct iov_iter *i)
if (ret < 0)
return ret;
+ /*
+ * We can only trust the isize with inode lock held, or it can race with
+ * other buffered writes and cause incorrect call of
+ * pagecache_isize_extended() to overwrite existing data.
+ */
+ old_isize = i_size_read(inode);
+
ret = generic_write_checks(iocb, i);
if (ret <= 0)
goto out;
diff --git a/fs/btrfs/tests/delayed-refs-tests.c b/fs/btrfs/tests/delayed-refs-tests.c
index 6558508c2ddf..265370e79a54 100644
--- a/fs/btrfs/tests/delayed-refs-tests.c
+++ b/fs/btrfs/tests/delayed-refs-tests.c
@@ -1009,6 +1009,7 @@ int btrfs_test_delayed_refs(u32 sectorsize, u32 nodesize)
if (!ret)
ret = select_delayed_refs_test(&trans);
+ kfree(transaction);
out_free_fs_info:
btrfs_free_dummy_fs_info(fs_info);
return ret;
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 0a0776489055..fb22d4425cb0 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -7200,8 +7200,12 @@ static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info,
fs_devices = find_fsid(fsid, NULL);
if (!fs_devices) {
- if (!btrfs_test_opt(fs_info, DEGRADED))
+ if (!btrfs_test_opt(fs_info, DEGRADED)) {
+ btrfs_err(fs_info,
+ "failed to find fsid %pU when attempting to open seed devices",
+ fsid);
return ERR_PTR(-ENOENT);
+ }
fs_devices = alloc_fs_devices(fsid);
if (IS_ERR(fs_devices))
diff --git a/fs/efivarfs/file.c b/fs/efivarfs/file.c
index cb1b6d0c3454..c294a8fc566d 100644
--- a/fs/efivarfs/file.c
+++ b/fs/efivarfs/file.c
@@ -57,10 +57,11 @@ static ssize_t efivarfs_file_write(struct file *file,
if (bytes == -ENOENT) {
/*
- * zero size signals to release that the write deleted
- * the variable
+ * FIXME: temporary workaround for fwupdate, signal
+ * failed write with a 1 to keep created but not
+ * written files
*/
- i_size_write(inode, 0);
+ i_size_write(inode, 1);
} else {
i_size_write(inode, datasize + sizeof(attributes));
inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
@@ -124,7 +125,8 @@ static int efivarfs_file_release(struct inode *inode, struct file *file)
struct efivar_entry *var = inode->i_private;
inode_lock(inode);
- var->removed = (--var->open_count == 0 && i_size_read(inode) == 0);
+ /* FIXME: temporary work around for fwupdate */
+ var->removed = (--var->open_count == 0 && i_size_read(inode) == 1);
inode_unlock(inode);
if (var->removed)
diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c
index 09fcf731e65d..6eae8cf655c1 100644
--- a/fs/efivarfs/super.c
+++ b/fs/efivarfs/super.c
@@ -367,6 +367,8 @@ static int efivarfs_fill_super(struct super_block *sb, struct fs_context *fc)
if (err)
return err;
+ register_pm_notifier(&sfi->pm_nb);
+
return efivar_init(efivarfs_callback, sb, true);
}
@@ -552,7 +554,6 @@ static int efivarfs_init_fs_context(struct fs_context *fc)
sfi->pm_nb.notifier_call = efivarfs_pm_notify;
sfi->pm_nb.priority = 0;
- register_pm_notifier(&sfi->pm_nb);
return 0;
}
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 5b5f789b37eb..2b2d1b755544 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -838,6 +838,12 @@ static int fuse_check_folio(struct folio *folio)
return 0;
}
+/*
+ * Attempt to steal a page from the splice() pipe and move it into the
+ * pagecache. If successful, the pointer in @pagep will be updated. The
+ * folio that was originally in @pagep will lose a reference and the new
+ * folio returned in @pagep will carry a reference.
+ */
static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
{
int err;
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 198862b086ff..3805f9b06c9d 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -1636,7 +1636,7 @@ static const char *fuse_get_link(struct dentry *dentry, struct inode *inode,
goto out_err;
if (fc->cache_symlinks)
- return page_get_link(dentry, inode, callback);
+ return page_get_link_raw(dentry, inode, callback);
err = -ECHILD;
if (!dentry)
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 7d92a5479998..d63e56fd3dd2 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -955,8 +955,10 @@ static void fuse_readpages_end(struct fuse_mount *fm, struct fuse_args *args,
fuse_invalidate_atime(inode);
}
- for (i = 0; i < ap->num_folios; i++)
+ for (i = 0; i < ap->num_folios; i++) {
folio_end_read(ap->folios[i], !err);
+ folio_put(ap->folios[i]);
+ }
if (ia->ff)
fuse_file_put(ia->ff, false);
@@ -1048,7 +1050,14 @@ static void fuse_readahead(struct readahead_control *rac)
ap = &ia->ap;
while (ap->num_folios < cur_pages) {
- folio = readahead_folio(rac);
+ /*
+ * This returns a folio with a ref held on it.
+ * The ref needs to be held until the request is
+ * completed, since the splice case (see
+ * fuse_try_move_page()) drops the ref after it's
+ * replaced in the page cache.
+ */
+ folio = __readahead_folio(rac);
ap->folios[ap->num_folios] = folio;
ap->descs[ap->num_folios].length = folio_size(folio);
ap->num_folios++;
diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c
index b521eb15759e..0e47da82b0c2 100644
--- a/fs/iomap/direct-io.c
+++ b/fs/iomap/direct-io.c
@@ -427,12 +427,10 @@ static loff_t iomap_dio_bio_iter(const struct iomap_iter *iter,
bio_put(bio);
goto zero_tail;
}
- if (dio->flags & IOMAP_DIO_WRITE) {
+ if (dio->flags & IOMAP_DIO_WRITE)
task_io_account_write(n);
- } else {
- if (dio->flags & IOMAP_DIO_DIRTY)
- bio_set_pages_dirty(bio);
- }
+ else if (dio->flags & IOMAP_DIO_DIRTY)
+ bio_set_pages_dirty(bio);
dio->size += n;
copied += n;
diff --git a/fs/namei.c b/fs/namei.c
index 3ab9440c5b93..ecb7b95c2ca3 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -5356,10 +5356,9 @@ const char *vfs_get_link(struct dentry *dentry, struct delayed_call *done)
EXPORT_SYMBOL(vfs_get_link);
/* get the link contents into pagecache */
-const char *page_get_link(struct dentry *dentry, struct inode *inode,
- struct delayed_call *callback)
+static char *__page_get_link(struct dentry *dentry, struct inode *inode,
+ struct delayed_call *callback)
{
- char *kaddr;
struct page *page;
struct address_space *mapping = inode->i_mapping;
@@ -5378,8 +5377,23 @@ const char *page_get_link(struct dentry *dentry, struct inode *inode,
}
set_delayed_call(callback, page_put_link, page);
BUG_ON(mapping_gfp_mask(mapping) & __GFP_HIGHMEM);
- kaddr = page_address(page);
- nd_terminate_link(kaddr, inode->i_size, PAGE_SIZE - 1);
+ return page_address(page);
+}
+
+const char *page_get_link_raw(struct dentry *dentry, struct inode *inode,
+ struct delayed_call *callback)
+{
+ return __page_get_link(dentry, inode, callback);
+}
+EXPORT_SYMBOL_GPL(page_get_link_raw);
+
+const char *page_get_link(struct dentry *dentry, struct inode *inode,
+ struct delayed_call *callback)
+{
+ char *kaddr = __page_get_link(dentry, inode, callback);
+
+ if (!IS_ERR(kaddr))
+ nd_terminate_link(kaddr, inode->i_size, PAGE_SIZE - 1);
return kaddr;
}
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index 035ba52742a5..4db912f56230 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -781,6 +781,43 @@ int nfs4_inode_return_delegation(struct inode *inode)
}
/**
+ * nfs4_inode_set_return_delegation_on_close - asynchronously return a delegation
+ * @inode: inode to process
+ *
+ * This routine is called to request that the delegation be returned as soon
+ * as the file is closed. If the file is already closed, the delegation is
+ * immediately returned.
+ */
+void nfs4_inode_set_return_delegation_on_close(struct inode *inode)
+{
+ struct nfs_delegation *delegation;
+ struct nfs_delegation *ret = NULL;
+
+ if (!inode)
+ return;
+ rcu_read_lock();
+ delegation = nfs4_get_valid_delegation(inode);
+ if (!delegation)
+ goto out;
+ spin_lock(&delegation->lock);
+ if (!delegation->inode)
+ goto out_unlock;
+ if (list_empty(&NFS_I(inode)->open_files) &&
+ !test_and_set_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) {
+ /* Refcount matched in nfs_end_delegation_return() */
+ ret = nfs_get_delegation(delegation);
+ } else
+ set_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags);
+out_unlock:
+ spin_unlock(&delegation->lock);
+ if (ret)
+ nfs_clear_verifier_delegated(inode);
+out:
+ rcu_read_unlock();
+ nfs_end_delegation_return(inode, ret, 0);
+}
+
+/**
* nfs4_inode_return_delegation_on_close - asynchronously return a delegation
* @inode: inode to process
*
diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h
index 71524d34ed20..8ff5ab9c5c25 100644
--- a/fs/nfs/delegation.h
+++ b/fs/nfs/delegation.h
@@ -49,6 +49,7 @@ void nfs_inode_reclaim_delegation(struct inode *inode, const struct cred *cred,
unsigned long pagemod_limit, u32 deleg_type);
int nfs4_inode_return_delegation(struct inode *inode);
void nfs4_inode_return_delegation_on_close(struct inode *inode);
+void nfs4_inode_set_return_delegation_on_close(struct inode *inode);
int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid);
void nfs_inode_evict_delegation(struct inode *inode);
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index f45beea92d03..f32f8d7c9122 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -56,6 +56,7 @@
#include <linux/uaccess.h>
#include <linux/atomic.h>
+#include "delegation.h"
#include "internal.h"
#include "iostat.h"
#include "pnfs.h"
@@ -130,6 +131,20 @@ static void nfs_direct_truncate_request(struct nfs_direct_req *dreq,
dreq->count = req_start;
}
+static void nfs_direct_file_adjust_size_locked(struct inode *inode,
+ loff_t offset, size_t count)
+{
+ loff_t newsize = offset + (loff_t)count;
+ loff_t oldsize = i_size_read(inode);
+
+ if (newsize > oldsize) {
+ i_size_write(inode, newsize);
+ NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_SIZE;
+ trace_nfs_size_grow(inode, newsize);
+ nfs_inc_stats(inode, NFSIOS_EXTENDWRITE);
+ }
+}
+
/**
* nfs_swap_rw - NFS address space operation for swap I/O
* @iocb: target I/O control block
@@ -272,6 +287,8 @@ static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
nfs_direct_count_bytes(dreq, hdr);
spin_unlock(&dreq->lock);
+ nfs_update_delegated_atime(dreq->inode);
+
while (!list_empty(&hdr->pages)) {
struct nfs_page *req = nfs_list_entry(hdr->pages.next);
struct page *page = req->wb_page;
@@ -741,6 +758,7 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
struct nfs_direct_req *dreq = hdr->dreq;
struct nfs_commit_info cinfo;
struct nfs_page *req = nfs_list_entry(hdr->pages.next);
+ struct inode *inode = dreq->inode;
int flags = NFS_ODIRECT_DONE;
trace_nfs_direct_write_completion(dreq);
@@ -762,6 +780,11 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
}
spin_unlock(&dreq->lock);
+ spin_lock(&inode->i_lock);
+ nfs_direct_file_adjust_size_locked(inode, dreq->io_start, dreq->count);
+ nfs_update_delegated_mtime_locked(dreq->inode);
+ spin_unlock(&inode->i_lock);
+
while (!list_empty(&hdr->pages)) {
req = nfs_list_entry(hdr->pages.next);
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index df9669d4ded7..6e95db6c17e9 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -133,6 +133,7 @@ nfs4_label_init_security(struct inode *dir, struct dentry *dentry,
if (err)
return NULL;
+ label->lsmid = shim.id;
label->label = shim.context;
label->len = shim.len;
return label;
@@ -145,7 +146,7 @@ nfs4_label_release_security(struct nfs4_label *label)
if (label) {
shim.context = label->label;
shim.len = label->len;
- shim.id = LSM_ID_UNDEF;
+ shim.id = label->lsmid;
security_release_secctx(&shim);
}
}
@@ -3906,8 +3907,11 @@ nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx,
static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync)
{
+ struct dentry *dentry = ctx->dentry;
if (ctx->state == NULL)
return;
+ if (dentry->d_flags & DCACHE_NFSFS_RENAMED)
+ nfs4_inode_set_return_delegation_on_close(d_inode(dentry));
if (is_sync)
nfs4_close_sync(ctx->state, _nfs4_ctx_to_openmode(ctx));
else
@@ -6269,7 +6273,7 @@ static int _nfs4_get_security_label(struct inode *inode, void *buf,
size_t buflen)
{
struct nfs_server *server = NFS_SERVER(inode);
- struct nfs4_label label = {0, 0, buflen, buf};
+ struct nfs4_label label = {0, 0, 0, buflen, buf};
u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL };
struct nfs_fattr fattr = {
@@ -6374,7 +6378,7 @@ static int nfs4_do_set_security_label(struct inode *inode,
static int
nfs4_set_security_label(struct inode *inode, const void *buf, size_t buflen)
{
- struct nfs4_label ilabel = {0, 0, buflen, (char *)buf };
+ struct nfs4_label ilabel = {0, 0, 0, buflen, (char *)buf };
struct nfs_fattr *fattr;
int status;
diff --git a/fs/nsfs.c b/fs/nsfs.c
index 663f8656158d..f7fddf8ecf73 100644
--- a/fs/nsfs.c
+++ b/fs/nsfs.c
@@ -37,7 +37,6 @@ static char *ns_dname(struct dentry *dentry, char *buffer, int buflen)
}
const struct dentry_operations ns_dentry_operations = {
- .d_delete = always_delete_dentry,
.d_dname = ns_dname,
.d_prune = stashed_dentry_prune,
};
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
index 0c28e5fa3407..d7310fcf3888 100644
--- a/fs/overlayfs/copy_up.c
+++ b/fs/overlayfs/copy_up.c
@@ -618,7 +618,6 @@ static int ovl_link_up(struct ovl_copy_up_ctx *c)
err = PTR_ERR(upper);
if (!IS_ERR(upper)) {
err = ovl_do_link(ofs, ovl_dentry_upper(c->dentry), udir, upper);
- dput(upper);
if (!err) {
/* Restore timestamps on parent (best effort) */
@@ -626,6 +625,7 @@ static int ovl_link_up(struct ovl_copy_up_ctx *c)
ovl_dentry_set_upper_alias(c->dentry);
ovl_dentry_update_reval(c->dentry, upper);
}
+ dput(upper);
}
inode_unlock(udir);
if (err)
diff --git a/fs/pidfs.c b/fs/pidfs.c
index 63f9699ebac3..c0478b3c55d9 100644
--- a/fs/pidfs.c
+++ b/fs/pidfs.c
@@ -521,7 +521,6 @@ static char *pidfs_dname(struct dentry *dentry, char *buffer, int buflen)
}
const struct dentry_operations pidfs_dentry_operations = {
- .d_delete = always_delete_dentry,
.d_dname = pidfs_dname,
.d_prune = stashed_dentry_prune,
};
diff --git a/include/asm-generic/hugetlb.h b/include/asm-generic/hugetlb.h
index f42133dae68e..2afc95bf1655 100644
--- a/include/asm-generic/hugetlb.h
+++ b/include/asm-generic/hugetlb.h
@@ -90,7 +90,7 @@ static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
#ifndef __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep)
+ unsigned long addr, pte_t *ptep, unsigned long sz)
{
return ptep_get_and_clear(mm, addr, ptep);
}
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 02a4adb4a999..0d5b186abee8 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -457,7 +457,7 @@ defined(CONFIG_AUTOFDO_CLANG) || defined(CONFIG_PROPELLER_CLANG)
. = ALIGN((align)); \
.rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
__start_rodata = .; \
- *(.rodata) *(.rodata.*) \
+ *(.rodata) *(.rodata.*) *(.data.rel.ro*) \
SCHED_DATA \
RO_AFTER_INIT_DATA /* Read only after init */ \
. = ALIGN(8); \
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 248416ecd01c..d37751789bf5 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -196,10 +196,11 @@ struct gendisk {
unsigned int zone_capacity;
unsigned int last_zone_capacity;
unsigned long __rcu *conv_zones_bitmap;
- unsigned int zone_wplugs_hash_bits;
- spinlock_t zone_wplugs_lock;
+ unsigned int zone_wplugs_hash_bits;
+ atomic_t nr_zone_wplugs;
+ spinlock_t zone_wplugs_lock;
struct mempool_s *zone_wplugs_pool;
- struct hlist_head *zone_wplugs_hash;
+ struct hlist_head *zone_wplugs_hash;
struct workqueue_struct *zone_wplugs_wq;
#endif /* CONFIG_BLK_DEV_ZONED */
@@ -367,6 +368,7 @@ struct queue_limits {
unsigned int max_sectors;
unsigned int max_user_sectors;
unsigned int max_segment_size;
+ unsigned int min_segment_size;
unsigned int physical_block_size;
unsigned int logical_block_size;
unsigned int alignment_offset;
diff --git a/include/linux/call_once.h b/include/linux/call_once.h
index 6261aa0b3fb0..13cd6469e7e5 100644
--- a/include/linux/call_once.h
+++ b/include/linux/call_once.h
@@ -26,20 +26,41 @@ do { \
__once_init((once), #once, &__key); \
} while (0)
-static inline void call_once(struct once *once, void (*cb)(struct once *))
+/*
+ * call_once - Ensure a function has been called exactly once
+ *
+ * @once: Tracking struct
+ * @cb: Function to be called
+ *
+ * If @once has never completed successfully before, call @cb and, if
+ * it returns a zero or positive value, mark @once as completed. Return
+ * the value returned by @cb
+ *
+ * If @once has completed succesfully before, return 0.
+ *
+ * The call to @cb is implicitly surrounded by a mutex, though for
+ * efficiency the * function avoids taking it after the first call.
+ */
+static inline int call_once(struct once *once, int (*cb)(struct once *))
{
- /* Pairs with atomic_set_release() below. */
- if (atomic_read_acquire(&once->state) == ONCE_COMPLETED)
- return;
-
- guard(mutex)(&once->lock);
- WARN_ON(atomic_read(&once->state) == ONCE_RUNNING);
- if (atomic_read(&once->state) != ONCE_NOT_STARTED)
- return;
-
- atomic_set(&once->state, ONCE_RUNNING);
- cb(once);
- atomic_set_release(&once->state, ONCE_COMPLETED);
+ int r, state;
+
+ /* Pairs with atomic_set_release() below. */
+ if (atomic_read_acquire(&once->state) == ONCE_COMPLETED)
+ return 0;
+
+ guard(mutex)(&once->lock);
+ state = atomic_read(&once->state);
+ if (unlikely(state != ONCE_NOT_STARTED))
+ return WARN_ON_ONCE(state != ONCE_COMPLETED) ? -EINVAL : 0;
+
+ atomic_set(&once->state, ONCE_RUNNING);
+ r = cb(once);
+ if (r < 0)
+ atomic_set(&once->state, ONCE_NOT_STARTED);
+ else
+ atomic_set_release(&once->state, ONCE_COMPLETED);
+ return r;
}
#endif /* _LINUX_CALL_ONCE_H */
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 200fd3c5bc70..155385754824 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -110,7 +110,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
/* Unreachable code */
#ifdef CONFIG_OBJTOOL
/* Annotate a C jump table to allow objtool to follow the code flow */
-#define __annotate_jump_table __section(".rodata..c_jump_table,\"a\",@progbits #")
+#define __annotate_jump_table __section(".data.rel.ro.c_jump_table")
#else /* !CONFIG_OBJTOOL */
#define __annotate_jump_table
#endif /* CONFIG_OBJTOOL */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 2c3b2f8a621f..2788df98080f 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -2975,8 +2975,8 @@ static inline ssize_t generic_write_sync(struct kiocb *iocb, ssize_t count)
} else if (iocb->ki_flags & IOCB_DONTCACHE) {
struct address_space *mapping = iocb->ki_filp->f_mapping;
- filemap_fdatawrite_range_kick(mapping, iocb->ki_pos,
- iocb->ki_pos + count);
+ filemap_fdatawrite_range_kick(mapping, iocb->ki_pos - count,
+ iocb->ki_pos - 1);
}
return count;
@@ -3452,6 +3452,8 @@ extern const struct file_operations generic_ro_fops;
extern int readlink_copy(char __user *, int, const char *, int);
extern int page_readlink(struct dentry *, char __user *, int);
+extern const char *page_get_link_raw(struct dentry *, struct inode *,
+ struct delayed_call *);
extern const char *page_get_link(struct dentry *, struct inode *,
struct delayed_call *);
extern void page_put_link(void *);
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index ec8c0ccc8f95..bf5f7256bd28 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -1004,7 +1004,9 @@ static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
- return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
+ unsigned long psize = huge_page_size(hstate_vma(vma));
+
+ return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep, psize);
}
#endif
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index 71fbebfa43c7..9ac83ca88326 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -47,6 +47,7 @@ struct nfs4_acl {
struct nfs4_label {
uint32_t lfs;
uint32_t pi;
+ u32 lsmid;
u32 len;
char *label;
};
diff --git a/include/linux/rcuref.h b/include/linux/rcuref.h
index 2c8bfd0f1b6b..6322d8c1c6b4 100644
--- a/include/linux/rcuref.h
+++ b/include/linux/rcuref.h
@@ -71,27 +71,30 @@ static inline __must_check bool rcuref_get(rcuref_t *ref)
return rcuref_get_slowpath(ref);
}
-extern __must_check bool rcuref_put_slowpath(rcuref_t *ref);
+extern __must_check bool rcuref_put_slowpath(rcuref_t *ref, unsigned int cnt);
/*
* Internal helper. Do not invoke directly.
*/
static __always_inline __must_check bool __rcuref_put(rcuref_t *ref)
{
+ int cnt;
+
RCU_LOCKDEP_WARN(!rcu_read_lock_held() && preemptible(),
"suspicious rcuref_put_rcusafe() usage");
/*
* Unconditionally decrease the reference count. The saturation and
* dead zones provide enough tolerance for this.
*/
- if (likely(!atomic_add_negative_release(-1, &ref->refcnt)))
+ cnt = atomic_sub_return_release(1, &ref->refcnt);
+ if (likely(cnt >= 0))
return false;
/*
* Handle the last reference drop and cases inside the saturation
* and dead zones.
*/
- return rcuref_put_slowpath(ref);
+ return rcuref_put_slowpath(ref, cnt);
}
/**
diff --git a/include/linux/socket.h b/include/linux/socket.h
index d18cc47e89bd..c3322eb3d686 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -392,6 +392,8 @@ struct ucred {
extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr);
extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data);
+extern int put_cmsg_notrunc(struct msghdr *msg, int level, int type, int len,
+ void *data);
struct timespec64;
struct __kernel_timespec;
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index fec1e8a1570c..eac57914dcf3 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -158,7 +158,6 @@ enum {
RPC_TASK_NEED_XMIT,
RPC_TASK_NEED_RECV,
RPC_TASK_MSG_PIN_WAIT,
- RPC_TASK_SIGNALLED,
};
#define rpc_test_and_set_running(t) \
@@ -171,7 +170,7 @@ enum {
#define RPC_IS_ACTIVATED(t) test_bit(RPC_TASK_ACTIVE, &(t)->tk_runstate)
-#define RPC_SIGNALLED(t) test_bit(RPC_TASK_SIGNALLED, &(t)->tk_runstate)
+#define RPC_SIGNALLED(t) (READ_ONCE(task->tk_rpc_status) == -ERESTARTSYS)
/*
* Task priorities.
diff --git a/include/net/sock.h b/include/net/sock.h
index 8036b3b79cd8..7ef728324e4e 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1751,6 +1751,7 @@ static inline bool sock_allow_reclassification(const struct sock *csk)
struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
struct proto *prot, int kern);
void sk_free(struct sock *sk);
+void sk_net_refcnt_upgrade(struct sock *sk);
void sk_destruct(struct sock *sk);
struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority);
void sk_free_unlock_clone(struct sock *sk);
diff --git a/include/sound/cs35l56.h b/include/sound/cs35l56.h
index 3dc7a1551ac3..5d653a3491d0 100644
--- a/include/sound/cs35l56.h
+++ b/include/sound/cs35l56.h
@@ -12,6 +12,7 @@
#include <linux/firmware/cirrus/cs_dsp.h>
#include <linux/regulator/consumer.h>
#include <linux/regmap.h>
+#include <linux/spi/spi.h>
#include <sound/cs-amp-lib.h>
#define CS35L56_DEVID 0x0000000
@@ -61,6 +62,7 @@
#define CS35L56_IRQ1_MASK_8 0x000E0AC
#define CS35L56_IRQ1_MASK_18 0x000E0D4
#define CS35L56_IRQ1_MASK_20 0x000E0DC
+#define CS35L56_DSP_MBOX_1_RAW 0x0011000
#define CS35L56_DSP_VIRTUAL1_MBOX_1 0x0011020
#define CS35L56_DSP_VIRTUAL1_MBOX_2 0x0011024
#define CS35L56_DSP_VIRTUAL1_MBOX_3 0x0011028
@@ -224,6 +226,7 @@
#define CS35L56_HALO_STATE_SHUTDOWN 1
#define CS35L56_HALO_STATE_BOOT_DONE 2
+#define CS35L56_MBOX_CMD_PING 0x0A000000
#define CS35L56_MBOX_CMD_AUDIO_PLAY 0x0B000001
#define CS35L56_MBOX_CMD_AUDIO_PAUSE 0x0B000002
#define CS35L56_MBOX_CMD_AUDIO_REINIT 0x0B000003
@@ -254,6 +257,16 @@
#define CS35L56_NUM_BULK_SUPPLIES 3
#define CS35L56_NUM_DSP_REGIONS 5
+/* Additional margin for SYSTEM_RESET to control port ready on SPI */
+#define CS35L56_SPI_RESET_TO_PORT_READY_US (CS35L56_CONTROL_PORT_READY_US + 2500)
+
+struct cs35l56_spi_payload {
+ __be32 addr;
+ __be16 pad;
+ __be32 value;
+} __packed;
+static_assert(sizeof(struct cs35l56_spi_payload) == 10);
+
struct cs35l56_base {
struct device *dev;
struct regmap *regmap;
@@ -269,6 +282,7 @@ struct cs35l56_base {
s8 cal_index;
struct cirrus_amp_cal_data cal_data;
struct gpio_desc *reset_gpio;
+ struct cs35l56_spi_payload *spi_payload_buf;
};
static inline bool cs35l56_is_otp_register(unsigned int reg)
@@ -276,6 +290,23 @@ static inline bool cs35l56_is_otp_register(unsigned int reg)
return (reg >> 16) == 3;
}
+static inline int cs35l56_init_config_for_spi(struct cs35l56_base *cs35l56,
+ struct spi_device *spi)
+{
+ cs35l56->spi_payload_buf = devm_kzalloc(&spi->dev,
+ sizeof(*cs35l56->spi_payload_buf),
+ GFP_KERNEL | GFP_DMA);
+ if (!cs35l56->spi_payload_buf)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static inline bool cs35l56_is_spi(struct cs35l56_base *cs35l56)
+{
+ return IS_ENABLED(CONFIG_SPI_MASTER) && !!cs35l56->spi_payload_buf;
+}
+
extern const struct regmap_config cs35l56_regmap_i2c;
extern const struct regmap_config cs35l56_regmap_spi;
extern const struct regmap_config cs35l56_regmap_sdw;
diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h
index b0db89058c91..958a2460330c 100644
--- a/include/trace/events/afs.h
+++ b/include/trace/events/afs.h
@@ -174,6 +174,7 @@ enum yfs_cm_operation {
EM(afs_cell_trace_get_queue_dns, "GET q-dns ") \
EM(afs_cell_trace_get_queue_manage, "GET q-mng ") \
EM(afs_cell_trace_get_queue_new, "GET q-new ") \
+ EM(afs_cell_trace_get_server, "GET server") \
EM(afs_cell_trace_get_vol, "GET vol ") \
EM(afs_cell_trace_insert, "INSERT ") \
EM(afs_cell_trace_manage, "MANAGE ") \
@@ -182,6 +183,7 @@ enum yfs_cm_operation {
EM(afs_cell_trace_put_destroy, "PUT destry") \
EM(afs_cell_trace_put_queue_work, "PUT q-work") \
EM(afs_cell_trace_put_queue_fail, "PUT q-fail") \
+ EM(afs_cell_trace_put_server, "PUT server") \
EM(afs_cell_trace_put_vol, "PUT vol ") \
EM(afs_cell_trace_see_source, "SEE source") \
EM(afs_cell_trace_see_ws, "SEE ws ") \
diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
index b13dc275ef4a..851841336ee6 100644
--- a/include/trace/events/sunrpc.h
+++ b/include/trace/events/sunrpc.h
@@ -360,8 +360,7 @@ TRACE_EVENT(rpc_request,
{ (1UL << RPC_TASK_ACTIVE), "ACTIVE" }, \
{ (1UL << RPC_TASK_NEED_XMIT), "NEED_XMIT" }, \
{ (1UL << RPC_TASK_NEED_RECV), "NEED_RECV" }, \
- { (1UL << RPC_TASK_MSG_PIN_WAIT), "MSG_PIN_WAIT" }, \
- { (1UL << RPC_TASK_SIGNALLED), "SIGNALLED" })
+ { (1UL << RPC_TASK_MSG_PIN_WAIT), "MSG_PIN_WAIT" })
DECLARE_EVENT_CLASS(rpc_task_running,
diff --git a/include/uapi/linux/landlock.h b/include/uapi/linux/landlock.h
index 33745642f787..e1d2c27533b4 100644
--- a/include/uapi/linux/landlock.h
+++ b/include/uapi/linux/landlock.h
@@ -268,7 +268,9 @@ struct landlock_net_port_attr {
* ~~~~~~~~~~~~~~~~
*
* These flags enable to restrict a sandboxed process to a set of network
- * actions. This is supported since the Landlock ABI version 4.
+ * actions.
+ *
+ * This is supported since Landlock ABI version 4.
*
* The following access rights apply to TCP port numbers:
*
@@ -291,11 +293,13 @@ struct landlock_net_port_attr {
* Setting a flag for a ruleset will isolate the Landlock domain to forbid
* connections to resources outside the domain.
*
+ * This is supported since Landlock ABI version 6.
+ *
* Scopes:
*
* - %LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET: Restrict a sandboxed process from
* connecting to an abstract UNIX socket created by a process outside the
- * related Landlock domain (e.g. a parent domain or a non-sandboxed process).
+ * related Landlock domain (e.g., a parent domain or a non-sandboxed process).
* - %LANDLOCK_SCOPE_SIGNAL: Restrict a sandboxed process from sending a signal
* to another process outside the domain.
*/
diff --git a/io_uring/net.c b/io_uring/net.c
index 17852a6616ff..5d0b56ff50ee 100644
--- a/io_uring/net.c
+++ b/io_uring/net.c
@@ -322,7 +322,9 @@ static int io_sendmsg_copy_hdr(struct io_kiocb *req,
if (unlikely(ret))
return ret;
- return __get_compat_msghdr(&iomsg->msg, &cmsg, NULL);
+ ret = __get_compat_msghdr(&iomsg->msg, &cmsg, NULL);
+ sr->msg_control = iomsg->msg.msg_control_user;
+ return ret;
}
#endif
diff --git a/kernel/events/core.c b/kernel/events/core.c
index bcb09e011e9e..6364319e2f88 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -4950,7 +4950,7 @@ static struct perf_event_pmu_context *
find_get_pmu_context(struct pmu *pmu, struct perf_event_context *ctx,
struct perf_event *event)
{
- struct perf_event_pmu_context *new = NULL, *epc;
+ struct perf_event_pmu_context *new = NULL, *pos = NULL, *epc;
void *task_ctx_data = NULL;
if (!ctx->task) {
@@ -5007,12 +5007,19 @@ find_get_pmu_context(struct pmu *pmu, struct perf_event_context *ctx,
atomic_inc(&epc->refcount);
goto found_epc;
}
+ /* Make sure the pmu_ctx_list is sorted by PMU type: */
+ if (!pos && epc->pmu->type > pmu->type)
+ pos = epc;
}
epc = new;
new = NULL;
- list_add(&epc->pmu_ctx_entry, &ctx->pmu_ctx_list);
+ if (!pos)
+ list_add_tail(&epc->pmu_ctx_entry, &ctx->pmu_ctx_list);
+ else
+ list_add(&epc->pmu_ctx_entry, pos->pmu_ctx_entry.prev);
+
epc->ctx = ctx;
found_epc:
@@ -5962,14 +5969,15 @@ static int _perf_event_period(struct perf_event *event, u64 value)
if (!value)
return -EINVAL;
- if (event->attr.freq && value > sysctl_perf_event_sample_rate)
- return -EINVAL;
-
- if (perf_event_check_period(event, value))
- return -EINVAL;
-
- if (!event->attr.freq && (value & (1ULL << 63)))
- return -EINVAL;
+ if (event->attr.freq) {
+ if (value > sysctl_perf_event_sample_rate)
+ return -EINVAL;
+ } else {
+ if (perf_event_check_period(event, value))
+ return -EINVAL;
+ if (value & (1ULL << 63))
+ return -EINVAL;
+ }
event_function_call(event, __perf_event_period, &value);
@@ -8321,7 +8329,8 @@ void perf_event_exec(void)
perf_event_enable_on_exec(ctx);
perf_event_remove_on_exec(ctx);
- perf_iterate_ctx(ctx, perf_event_addr_filters_exec, NULL, true);
+ scoped_guard(rcu)
+ perf_iterate_ctx(ctx, perf_event_addr_filters_exec, NULL, true);
perf_unpin_context(ctx);
put_ctx(ctx);
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index bf2a87a0a378..b4ca8898fe17 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -495,6 +495,11 @@ retry:
if (ret <= 0)
goto put_old;
+ if (is_zero_page(old_page)) {
+ ret = -EINVAL;
+ goto put_old;
+ }
+
if (WARN(!is_register && PageCompound(old_page),
"uprobe unregister should never work on compound page\n")) {
ret = -EINVAL;
@@ -762,10 +767,14 @@ static struct uprobe *hprobe_expire(struct hprobe *hprobe, bool get)
enum hprobe_state hstate;
/*
- * return_instance's hprobe is protected by RCU.
- * Underlying uprobe is itself protected from reuse by SRCU.
+ * Caller should guarantee that return_instance is not going to be
+ * freed from under us. This can be achieved either through holding
+ * rcu_read_lock() or by owning return_instance in the first place.
+ *
+ * Underlying uprobe is itself protected from reuse by SRCU, so ensure
+ * SRCU lock is held properly.
*/
- lockdep_assert(rcu_read_lock_held() && srcu_read_lock_held(&uretprobes_srcu));
+ lockdep_assert(srcu_read_lock_held(&uretprobes_srcu));
hstate = READ_ONCE(hprobe->state);
switch (hstate) {
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 9aecd914ac69..67189907214d 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7285,7 +7285,7 @@ out_unlock:
#if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC)
int __sched __cond_resched(void)
{
- if (should_resched(0)) {
+ if (should_resched(0) && !irqs_disabled()) {
preempt_schedule_common();
return 1;
}
diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
index 5a81d9a1e31f..0f1da199cfc7 100644
--- a/kernel/sched/ext.c
+++ b/kernel/sched/ext.c
@@ -3117,7 +3117,6 @@ static struct task_struct *pick_task_scx(struct rq *rq)
{
struct task_struct *prev = rq->curr;
struct task_struct *p;
- bool prev_on_scx = prev->sched_class == &ext_sched_class;
bool keep_prev = rq->scx.flags & SCX_RQ_BAL_KEEP;
bool kick_idle = false;
@@ -3137,14 +3136,18 @@ static struct task_struct *pick_task_scx(struct rq *rq)
* if pick_task_scx() is called without preceding balance_scx().
*/
if (unlikely(rq->scx.flags & SCX_RQ_BAL_PENDING)) {
- if (prev_on_scx) {
+ if (prev->scx.flags & SCX_TASK_QUEUED) {
keep_prev = true;
} else {
keep_prev = false;
kick_idle = true;
}
- } else if (unlikely(keep_prev && !prev_on_scx)) {
- /* only allowed during transitions */
+ } else if (unlikely(keep_prev &&
+ prev->sched_class != &ext_sched_class)) {
+ /*
+ * Can happen while enabling as SCX_RQ_BAL_PENDING assertion is
+ * conditional on scx_enabled() and may have been skipped.
+ */
WARN_ON_ONCE(scx_ops_enable_state() == SCX_OPS_ENABLED);
keep_prev = false;
}
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 6b0c25761ccb..fc88e0688daf 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -540,6 +540,7 @@ static int function_stat_show(struct seq_file *m, void *v)
static struct trace_seq s;
unsigned long long avg;
unsigned long long stddev;
+ unsigned long long stddev_denom;
#endif
guard(mutex)(&ftrace_profile_lock);
@@ -559,23 +560,19 @@ static int function_stat_show(struct seq_file *m, void *v)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
seq_puts(m, " ");
- /* Sample standard deviation (s^2) */
- if (rec->counter <= 1)
- stddev = 0;
- else {
- /*
- * Apply Welford's method:
- * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2)
- */
+ /*
+ * Variance formula:
+ * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2)
+ * Maybe Welford's method is better here?
+ * Divide only by 1000 for ns^2 -> us^2 conversion.
+ * trace_print_graph_duration will divide by 1000 again.
+ */
+ stddev = 0;
+ stddev_denom = rec->counter * (rec->counter - 1) * 1000;
+ if (stddev_denom) {
stddev = rec->counter * rec->time_squared -
rec->time * rec->time;
-
- /*
- * Divide only 1000 for ns^2 -> us^2 conversion.
- * trace_print_graph_duration will divide 1000 again.
- */
- stddev = div64_ul(stddev,
- rec->counter * (rec->counter - 1) * 1000);
+ stddev = div64_ul(stddev, stddev_denom);
}
trace_seq_init(&s);
diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
index 261163b00137..ad7419e24055 100644
--- a/kernel/trace/trace_events_hist.c
+++ b/kernel/trace/trace_events_hist.c
@@ -6724,27 +6724,27 @@ static int event_hist_trigger_parse(struct event_command *cmd_ops,
if (existing_hist_update_only(glob, trigger_data, file))
goto out_free;
- ret = event_trigger_register(cmd_ops, file, glob, trigger_data);
- if (ret < 0)
- goto out_free;
+ if (!get_named_trigger_data(trigger_data)) {
- if (get_named_trigger_data(trigger_data))
- goto enable;
+ ret = create_actions(hist_data);
+ if (ret)
+ goto out_free;
- ret = create_actions(hist_data);
- if (ret)
- goto out_unreg;
+ if (has_hist_vars(hist_data) || hist_data->n_var_refs) {
+ ret = save_hist_vars(hist_data);
+ if (ret)
+ goto out_free;
+ }
- if (has_hist_vars(hist_data) || hist_data->n_var_refs) {
- ret = save_hist_vars(hist_data);
+ ret = tracing_map_init(hist_data->map);
if (ret)
- goto out_unreg;
+ goto out_free;
}
- ret = tracing_map_init(hist_data->map);
- if (ret)
- goto out_unreg;
-enable:
+ ret = event_trigger_register(cmd_ops, file, glob, trigger_data);
+ if (ret < 0)
+ goto out_free;
+
ret = hist_trigger_enable(trigger_data, file);
if (ret)
goto out_unreg;
diff --git a/kernel/vhost_task.c b/kernel/vhost_task.c
index 8800f5acc007..2ef2e1b80091 100644
--- a/kernel/vhost_task.c
+++ b/kernel/vhost_task.c
@@ -133,7 +133,7 @@ struct vhost_task *vhost_task_create(bool (*fn)(void *),
vtsk = kzalloc(sizeof(*vtsk), GFP_KERNEL);
if (!vtsk)
- return NULL;
+ return ERR_PTR(-ENOMEM);
init_completion(&vtsk->exited);
mutex_init(&vtsk->exit_mutex);
vtsk->data = arg;
@@ -145,7 +145,7 @@ struct vhost_task *vhost_task_create(bool (*fn)(void *),
tsk = copy_process(NULL, 0, NUMA_NO_NODE, &args);
if (IS_ERR(tsk)) {
kfree(vtsk);
- return NULL;
+ return ERR_PTR(PTR_ERR(tsk));
}
vtsk->task = tsk;
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 97152f2250fe..bfe030b443e2 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -2254,8 +2254,10 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
* queues a new work item to a wq after destroy_workqueue(wq).
*/
if (unlikely(wq->flags & (__WQ_DESTROYING | __WQ_DRAINING) &&
- WARN_ON_ONCE(!is_chained_work(wq))))
+ WARN_ONCE(!is_chained_work(wq), "workqueue: cannot queue %ps on wq %s\n",
+ work->func, wq->name))) {
return;
+ }
rcu_read_lock();
retry:
/* pwq which will be used unless @work is executing elsewhere */
diff --git a/lib/rcuref.c b/lib/rcuref.c
index 97f300eca927..5bd726b71e39 100644
--- a/lib/rcuref.c
+++ b/lib/rcuref.c
@@ -220,6 +220,7 @@ EXPORT_SYMBOL_GPL(rcuref_get_slowpath);
/**
* rcuref_put_slowpath - Slowpath of __rcuref_put()
* @ref: Pointer to the reference count
+ * @cnt: The resulting value of the fastpath decrement
*
* Invoked when the reference count is outside of the valid zone.
*
@@ -233,10 +234,8 @@ EXPORT_SYMBOL_GPL(rcuref_get_slowpath);
* with a concurrent get()/put() pair. Caller is not allowed to
* deconstruct the protected object.
*/
-bool rcuref_put_slowpath(rcuref_t *ref)
+bool rcuref_put_slowpath(rcuref_t *ref, unsigned int cnt)
{
- unsigned int cnt = atomic_read(&ref->refcnt);
-
/* Did this drop the last reference? */
if (likely(cnt == RCUREF_NOREF)) {
/*
diff --git a/mm/filemap.c b/mm/filemap.c
index 804d7365680c..d4564a79eb35 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -445,7 +445,7 @@ EXPORT_SYMBOL(filemap_fdatawrite_range);
* filemap_fdatawrite_range_kick - start writeback on a range
* @mapping: target address_space
* @start: index to start writeback on
- * @end: last (non-inclusive) index for writeback
+ * @end: last (inclusive) index for writeback
*
* This is a non-integrity writeback helper, to start writing back folios
* for the indicated range.
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 163190e89ea1..09d6673040ed 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -5447,7 +5447,7 @@ static void move_huge_pte(struct vm_area_struct *vma, unsigned long old_addr,
if (src_ptl != dst_ptl)
spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
- pte = huge_ptep_get_and_clear(mm, old_addr, src_pte);
+ pte = huge_ptep_get_and_clear(mm, old_addr, src_pte, sz);
if (need_clear_uffd_wp && pte_marker_uffd_wp(pte))
huge_pte_clear(mm, new_addr, dst_pte, sz);
@@ -5622,7 +5622,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
}
- pte = huge_ptep_get_and_clear(mm, address, ptep);
+ pte = huge_ptep_get_and_clear(mm, address, ptep, sz);
tlb_remove_huge_tlb_entry(h, tlb, ptep, address);
if (huge_pte_dirty(pte))
set_page_dirty(page);
diff --git a/mm/truncate.c b/mm/truncate.c
index e2e115adfbc5..76d8fcd89bd0 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -548,8 +548,6 @@ int folio_unmap_invalidate(struct address_space *mapping, struct folio *folio,
VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
- if (folio_test_dirty(folio))
- return 0;
if (folio_mapped(folio))
unmap_mapping_folio(folio);
BUG_ON(folio_mapped(folio));
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index fec11e576f31..b22078b67972 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -632,7 +632,8 @@ void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
hci_conn_hold(conn->hcon);
- list_add(&chan->list, &conn->chan_l);
+ /* Append to the list since the order matters for ECRED */
+ list_add_tail(&chan->list, &conn->chan_l);
}
void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
@@ -3771,7 +3772,11 @@ static void l2cap_ecred_rsp_defer(struct l2cap_chan *chan, void *data)
struct l2cap_ecred_conn_rsp *rsp_flex =
container_of(&rsp->pdu.rsp, struct l2cap_ecred_conn_rsp, hdr);
- if (test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
+ /* Check if channel for outgoing connection or if it wasn't deferred
+ * since in those cases it must be skipped.
+ */
+ if (test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags) ||
+ !test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags))
return;
/* Reset ident so only one response is sent */
diff --git a/net/core/dev.c b/net/core/dev.c
index 1b252e9459fd..30da277c5a6f 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2141,21 +2141,15 @@ int register_netdevice_notifier_dev_net(struct net_device *dev,
struct notifier_block *nb,
struct netdev_net_notifier *nn)
{
- struct net *net = dev_net(dev);
int err;
- /* rtnl_net_lock() assumes dev is not yet published by
- * register_netdevice().
- */
- DEBUG_NET_WARN_ON_ONCE(!list_empty(&dev->dev_list));
-
- rtnl_net_lock(net);
- err = __register_netdevice_notifier_net(net, nb, false);
+ rtnl_net_dev_lock(dev);
+ err = __register_netdevice_notifier_net(dev_net(dev), nb, false);
if (!err) {
nn->nb = nb;
list_add(&nn->list, &dev->net_notifier_list);
}
- rtnl_net_unlock(net);
+ rtnl_net_dev_unlock(dev);
return err;
}
@@ -4763,7 +4757,7 @@ use_local_napi:
* we have to raise NET_RX_SOFTIRQ.
*/
if (!sd->in_net_rx_action)
- __raise_softirq_irqoff(NET_RX_SOFTIRQ);
+ raise_softirq_irqoff(NET_RX_SOFTIRQ);
}
#ifdef CONFIG_RPS
diff --git a/net/core/gro.c b/net/core/gro.c
index 78b320b63174..0ad549b07e03 100644
--- a/net/core/gro.c
+++ b/net/core/gro.c
@@ -653,6 +653,7 @@ static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
skb->pkt_type = PACKET_HOST;
skb->encapsulation = 0;
+ skb->ip_summed = CHECKSUM_NONE;
skb_shinfo(skb)->gso_type = 0;
skb_shinfo(skb)->gso_size = 0;
if (unlikely(skb->slow_gro)) {
diff --git a/net/core/scm.c b/net/core/scm.c
index 4f6a14babe5a..733c0cbd393d 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -282,6 +282,16 @@ efault:
}
EXPORT_SYMBOL(put_cmsg);
+int put_cmsg_notrunc(struct msghdr *msg, int level, int type, int len,
+ void *data)
+{
+ /* Don't produce truncated CMSGs */
+ if (!msg->msg_control || msg->msg_controllen < CMSG_LEN(len))
+ return -ETOOSMALL;
+
+ return put_cmsg(msg, level, type, len, data);
+}
+
void put_cmsg_scm_timestamping64(struct msghdr *msg, struct scm_timestamping_internal *tss_internal)
{
struct scm_timestamping64 tss;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 7b03b64fdcb2..b1c81687e9d8 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -6033,11 +6033,11 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet)
skb->offload_fwd_mark = 0;
skb->offload_l3_fwd_mark = 0;
#endif
+ ipvs_reset(skb);
if (!xnet)
return;
- ipvs_reset(skb);
skb->mark = 0;
skb_clear_tstamp(skb);
}
diff --git a/net/core/sock.c b/net/core/sock.c
index eae2ae70a2e0..6c0e87f97fa4 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -2246,6 +2246,7 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
get_net_track(net, &sk->ns_tracker, priority);
sock_inuse_add(net, 1);
} else {
+ net_passive_inc(net);
__netns_tracker_alloc(net, &sk->ns_tracker,
false, priority);
}
@@ -2270,6 +2271,7 @@ EXPORT_SYMBOL(sk_alloc);
static void __sk_destruct(struct rcu_head *head)
{
struct sock *sk = container_of(head, struct sock, sk_rcu);
+ struct net *net = sock_net(sk);
struct sk_filter *filter;
if (sk->sk_destruct)
@@ -2301,14 +2303,28 @@ static void __sk_destruct(struct rcu_head *head)
put_cred(sk->sk_peer_cred);
put_pid(sk->sk_peer_pid);
- if (likely(sk->sk_net_refcnt))
- put_net_track(sock_net(sk), &sk->ns_tracker);
- else
- __netns_tracker_free(sock_net(sk), &sk->ns_tracker, false);
-
+ if (likely(sk->sk_net_refcnt)) {
+ put_net_track(net, &sk->ns_tracker);
+ } else {
+ __netns_tracker_free(net, &sk->ns_tracker, false);
+ net_passive_dec(net);
+ }
sk_prot_free(sk->sk_prot_creator, sk);
}
+void sk_net_refcnt_upgrade(struct sock *sk)
+{
+ struct net *net = sock_net(sk);
+
+ WARN_ON_ONCE(sk->sk_net_refcnt);
+ __netns_tracker_free(net, &sk->ns_tracker, false);
+ net_passive_dec(net);
+ sk->sk_net_refcnt = 1;
+ get_net_track(net, &sk->ns_tracker, GFP_KERNEL);
+ sock_inuse_add(net, 1);
+}
+EXPORT_SYMBOL_GPL(sk_net_refcnt_upgrade);
+
void sk_destruct(struct sock *sk)
{
bool use_call_rcu = sock_flag(sk, SOCK_RCU_FREE);
@@ -2405,6 +2421,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
* is not properly dismantling its kernel sockets at netns
* destroy time.
*/
+ net_passive_inc(sock_net(newsk));
__netns_tracker_alloc(sock_net(newsk), &newsk->ns_tracker,
false, priority);
}
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index ad2741f1346a..c7769ee0d9c5 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -34,6 +34,7 @@ static int min_sndbuf = SOCK_MIN_SNDBUF;
static int min_rcvbuf = SOCK_MIN_RCVBUF;
static int max_skb_frags = MAX_SKB_FRAGS;
static int min_mem_pcpu_rsv = SK_MEMORY_PCPU_RESERVE;
+static int netdev_budget_usecs_min = 2 * USEC_PER_SEC / HZ;
static int net_msg_warn; /* Unused, but still a sysctl */
@@ -587,7 +588,7 @@ static struct ctl_table net_core_table[] = {
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
- .extra1 = SYSCTL_ZERO,
+ .extra1 = &netdev_budget_usecs_min,
},
{
.procname = "fb_tunnels_only_for_init_net",
diff --git a/net/ethtool/common.c b/net/ethtool/common.c
index d88e9080643b..b97374b508f6 100644
--- a/net/ethtool/common.c
+++ b/net/ethtool/common.c
@@ -6,6 +6,7 @@
#include <linux/rtnetlink.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/phy_link_topology.h>
+#include <net/netdev_queues.h>
#include "netlink.h"
#include "common.h"
@@ -771,6 +772,21 @@ int ethtool_check_ops(const struct ethtool_ops *ops)
return 0;
}
+void ethtool_ringparam_get_cfg(struct net_device *dev,
+ struct ethtool_ringparam *param,
+ struct kernel_ethtool_ringparam *kparam,
+ struct netlink_ext_ack *extack)
+{
+ memset(param, 0, sizeof(*param));
+ memset(kparam, 0, sizeof(*kparam));
+
+ param->cmd = ETHTOOL_GRINGPARAM;
+ dev->ethtool_ops->get_ringparam(dev, param, kparam, extack);
+
+ /* Driver gives us current state, we want to return current config */
+ kparam->tcp_data_split = dev->cfg->hds_config;
+}
+
static void ethtool_init_tsinfo(struct kernel_ethtool_ts_info *info)
{
memset(info, 0, sizeof(*info));
diff --git a/net/ethtool/common.h b/net/ethtool/common.h
index 58e9e7db06f9..a1088c2441d0 100644
--- a/net/ethtool/common.h
+++ b/net/ethtool/common.h
@@ -51,6 +51,12 @@ int ethtool_check_max_channel(struct net_device *dev,
struct ethtool_channels channels,
struct genl_info *info);
int ethtool_check_rss_ctx_busy(struct net_device *dev, u32 rss_context);
+
+void ethtool_ringparam_get_cfg(struct net_device *dev,
+ struct ethtool_ringparam *param,
+ struct kernel_ethtool_ringparam *kparam,
+ struct netlink_ext_ack *extack);
+
int __ethtool_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *info);
int ethtool_get_ts_info_by_phc(struct net_device *dev,
struct kernel_ethtool_ts_info *info,
diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
index 7609ce2b2c5e..1c3ba2247776 100644
--- a/net/ethtool/ioctl.c
+++ b/net/ethtool/ioctl.c
@@ -2059,8 +2059,8 @@ static int ethtool_get_ringparam(struct net_device *dev, void __user *useraddr)
static int ethtool_set_ringparam(struct net_device *dev, void __user *useraddr)
{
- struct ethtool_ringparam ringparam, max = { .cmd = ETHTOOL_GRINGPARAM };
struct kernel_ethtool_ringparam kernel_ringparam;
+ struct ethtool_ringparam ringparam, max;
int ret;
if (!dev->ethtool_ops->set_ringparam || !dev->ethtool_ops->get_ringparam)
@@ -2069,7 +2069,7 @@ static int ethtool_set_ringparam(struct net_device *dev, void __user *useraddr)
if (copy_from_user(&ringparam, useraddr, sizeof(ringparam)))
return -EFAULT;
- dev->ethtool_ops->get_ringparam(dev, &max, &kernel_ringparam, NULL);
+ ethtool_ringparam_get_cfg(dev, &max, &kernel_ringparam, NULL);
/* ensure new ring parameters are within the maximums */
if (ringparam.rx_pending > max.rx_max_pending ||
diff --git a/net/ethtool/rings.c b/net/ethtool/rings.c
index 7839bfd1ac6a..aeedd5ec6b8c 100644
--- a/net/ethtool/rings.c
+++ b/net/ethtool/rings.c
@@ -215,17 +215,16 @@ ethnl_set_rings_validate(struct ethnl_req_info *req_info,
static int
ethnl_set_rings(struct ethnl_req_info *req_info, struct genl_info *info)
{
- struct kernel_ethtool_ringparam kernel_ringparam = {};
- struct ethtool_ringparam ringparam = {};
+ struct kernel_ethtool_ringparam kernel_ringparam;
struct net_device *dev = req_info->dev;
+ struct ethtool_ringparam ringparam;
struct nlattr **tb = info->attrs;
const struct nlattr *err_attr;
bool mod = false;
int ret;
- dev->ethtool_ops->get_ringparam(dev, &ringparam,
- &kernel_ringparam, info->extack);
- kernel_ringparam.tcp_data_split = dev->cfg->hds_config;
+ ethtool_ringparam_get_cfg(dev, &ringparam, &kernel_ringparam,
+ info->extack);
ethnl_update_u32(&ringparam.rx_pending, tb[ETHTOOL_A_RINGS_RX], &mod);
ethnl_update_u32(&ringparam.rx_mini_pending,
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 285678d8ce07..57df7c1d2faa 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2457,14 +2457,12 @@ static int tcp_recvmsg_dmabuf(struct sock *sk, const struct sk_buff *skb,
*/
memset(&dmabuf_cmsg, 0, sizeof(dmabuf_cmsg));
dmabuf_cmsg.frag_size = copy;
- err = put_cmsg(msg, SOL_SOCKET, SO_DEVMEM_LINEAR,
- sizeof(dmabuf_cmsg), &dmabuf_cmsg);
- if (err || msg->msg_flags & MSG_CTRUNC) {
- msg->msg_flags &= ~MSG_CTRUNC;
- if (!err)
- err = -ETOOSMALL;
+ err = put_cmsg_notrunc(msg, SOL_SOCKET,
+ SO_DEVMEM_LINEAR,
+ sizeof(dmabuf_cmsg),
+ &dmabuf_cmsg);
+ if (err)
goto out;
- }
sent += copy;
@@ -2518,16 +2516,12 @@ static int tcp_recvmsg_dmabuf(struct sock *sk, const struct sk_buff *skb,
offset += copy;
remaining_len -= copy;
- err = put_cmsg(msg, SOL_SOCKET,
- SO_DEVMEM_DMABUF,
- sizeof(dmabuf_cmsg),
- &dmabuf_cmsg);
- if (err || msg->msg_flags & MSG_CTRUNC) {
- msg->msg_flags &= ~MSG_CTRUNC;
- if (!err)
- err = -ETOOSMALL;
+ err = put_cmsg_notrunc(msg, SOL_SOCKET,
+ SO_DEVMEM_DMABUF,
+ sizeof(dmabuf_cmsg),
+ &dmabuf_cmsg);
+ if (err)
goto out;
- }
atomic_long_inc(&niov->pp_ref_count);
tcp_xa_pool.netmems[tcp_xa_pool.idx++] = skb_frag_netmem(frag);
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index b089b08e9617..dfdb7a4608a8 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -815,12 +815,6 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
/* In sequence, PAWS is OK. */
- /* TODO: We probably should defer ts_recent change once
- * we take ownership of @req.
- */
- if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt))
- WRITE_ONCE(req->ts_recent, tmp_opt.rcv_tsval);
-
if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
/* Truncate SYN, it is out of window starting
at tcp_rsk(req)->rcv_isn + 1. */
@@ -869,6 +863,10 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
if (!child)
goto listen_overflow;
+ if (own_req && tmp_opt.saw_tstamp &&
+ !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt))
+ tcp_sk(child)->rx_opt.ts_recent = tmp_opt.rcv_tsval;
+
if (own_req && rsk_drop_req(req)) {
reqsk_queue_removed(&inet_csk(req->rsk_listener)->icsk_accept_queue, req);
inet_csk_reqsk_queue_drop_and_put(req->rsk_listener, req);
diff --git a/net/ipv6/rpl_iptunnel.c b/net/ipv6/rpl_iptunnel.c
index 0ac4283acdf2..7c05ac846646 100644
--- a/net/ipv6/rpl_iptunnel.c
+++ b/net/ipv6/rpl_iptunnel.c
@@ -262,10 +262,18 @@ static int rpl_input(struct sk_buff *skb)
{
struct dst_entry *orig_dst = skb_dst(skb);
struct dst_entry *dst = NULL;
+ struct lwtunnel_state *lwtst;
struct rpl_lwt *rlwt;
int err;
- rlwt = rpl_lwt_lwtunnel(orig_dst->lwtstate);
+ /* We cannot dereference "orig_dst" once ip6_route_input() or
+ * skb_dst_drop() is called. However, in order to detect a dst loop, we
+ * need the address of its lwtstate. So, save the address of lwtstate
+ * now and use it later as a comparison.
+ */
+ lwtst = orig_dst->lwtstate;
+
+ rlwt = rpl_lwt_lwtunnel(lwtst);
local_bh_disable();
dst = dst_cache_get(&rlwt->cache);
@@ -280,7 +288,9 @@ static int rpl_input(struct sk_buff *skb)
if (!dst) {
ip6_route_input(skb);
dst = skb_dst(skb);
- if (!dst->error) {
+
+ /* cache only if we don't create a dst reference loop */
+ if (!dst->error && lwtst != dst->lwtstate) {
local_bh_disable();
dst_cache_set_ip6(&rlwt->cache, dst,
&ipv6_hdr(skb)->saddr);
diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c
index 33833b2064c0..51583461ae29 100644
--- a/net/ipv6/seg6_iptunnel.c
+++ b/net/ipv6/seg6_iptunnel.c
@@ -472,10 +472,18 @@ static int seg6_input_core(struct net *net, struct sock *sk,
{
struct dst_entry *orig_dst = skb_dst(skb);
struct dst_entry *dst = NULL;
+ struct lwtunnel_state *lwtst;
struct seg6_lwt *slwt;
int err;
- slwt = seg6_lwt_lwtunnel(orig_dst->lwtstate);
+ /* We cannot dereference "orig_dst" once ip6_route_input() or
+ * skb_dst_drop() is called. However, in order to detect a dst loop, we
+ * need the address of its lwtstate. So, save the address of lwtstate
+ * now and use it later as a comparison.
+ */
+ lwtst = orig_dst->lwtstate;
+
+ slwt = seg6_lwt_lwtunnel(lwtst);
local_bh_disable();
dst = dst_cache_get(&slwt->cache);
@@ -490,7 +498,9 @@ static int seg6_input_core(struct net *net, struct sock *sk,
if (!dst) {
ip6_route_input(skb);
dst = skb_dst(skb);
- if (!dst->error) {
+
+ /* cache only if we don't create a dst reference loop */
+ if (!dst->error && lwtst != dst->lwtstate) {
local_bh_disable();
dst_cache_set_ip6(&slwt->cache, dst,
&ipv6_hdr(skb)->saddr);
diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
index 572d160edca3..c0e47f4f7b1a 100644
--- a/net/mptcp/pm_netlink.c
+++ b/net/mptcp/pm_netlink.c
@@ -1514,11 +1514,6 @@ static int mptcp_nl_remove_subflow_and_signal_addr(struct net *net,
if (mptcp_pm_is_userspace(msk))
goto next;
- if (list_empty(&msk->conn_list)) {
- mptcp_pm_remove_anno_addr(msk, addr, false);
- goto next;
- }
-
lock_sock(sk);
remove_subflow = mptcp_lookup_subflow_by_saddr(&msk->conn_list, addr);
mptcp_pm_remove_anno_addr(msk, addr, remove_subflow &&
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
index f6a207958459..ad21925af061 100644
--- a/net/mptcp/protocol.h
+++ b/net/mptcp/protocol.h
@@ -1199,6 +1199,8 @@ static inline void __mptcp_do_fallback(struct mptcp_sock *msk)
pr_debug("TCP fallback already done (msk=%p)\n", msk);
return;
}
+ if (WARN_ON_ONCE(!READ_ONCE(msk->allow_infinite_fallback)))
+ return;
set_bit(MPTCP_FALLBACK_DONE, &msk->flags);
}
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
index fd021cf8286e..9f18217dddc8 100644
--- a/net/mptcp/subflow.c
+++ b/net/mptcp/subflow.c
@@ -1142,7 +1142,6 @@ static enum mapping_status get_mapping_status(struct sock *ssk,
if (data_len == 0) {
pr_debug("infinite mapping received\n");
MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPRX);
- subflow->map_data_len = 0;
return MAPPING_INVALID;
}
@@ -1286,18 +1285,6 @@ static void subflow_sched_work_if_closed(struct mptcp_sock *msk, struct sock *ss
mptcp_schedule_work(sk);
}
-static bool subflow_can_fallback(struct mptcp_subflow_context *subflow)
-{
- struct mptcp_sock *msk = mptcp_sk(subflow->conn);
-
- if (subflow->mp_join)
- return false;
- else if (READ_ONCE(msk->csum_enabled))
- return !subflow->valid_csum_seen;
- else
- return READ_ONCE(msk->allow_infinite_fallback);
-}
-
static void mptcp_subflow_fail(struct mptcp_sock *msk, struct sock *ssk)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
@@ -1393,7 +1380,7 @@ fallback:
return true;
}
- if (!subflow_can_fallback(subflow) && subflow->map_data_len) {
+ if (!READ_ONCE(msk->allow_infinite_fallback)) {
/* fatal protocol error, close the socket.
* subflow_error_report() will introduce the appropriate barriers
*/
@@ -1772,10 +1759,7 @@ int mptcp_subflow_create_socket(struct sock *sk, unsigned short family,
* needs it.
* Update ns_tracker to current stack trace and refcounted tracker.
*/
- __netns_tracker_free(net, &sf->sk->ns_tracker, false);
- sf->sk->sk_net_refcnt = 1;
- get_net_track(net, &sf->sk->ns_tracker, GFP_KERNEL);
- sock_inuse_add(net, 1);
+ sk_net_refcnt_upgrade(sf->sk);
err = tcp_set_ulp(sf->sk, "mptcp");
if (err)
goto err_free;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 85311226183a..a53ea60d0a78 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -795,16 +795,6 @@ static int netlink_release(struct socket *sock)
sock_prot_inuse_add(sock_net(sk), &netlink_proto, -1);
- /* Because struct net might disappear soon, do not keep a pointer. */
- if (!sk->sk_net_refcnt && sock_net(sk) != &init_net) {
- __netns_tracker_free(sock_net(sk), &sk->ns_tracker, false);
- /* Because of deferred_put_nlk_sk and use of work queue,
- * it is possible netns will be freed before this socket.
- */
- sock_net_set(sk, &init_net);
- __netns_tracker_alloc(&init_net, &sk->ns_tracker,
- false, GFP_KERNEL);
- }
call_rcu(&nlk->rcu, deferred_put_nlk_sk);
return 0;
}
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index 0581c53e6517..3cc2f303bf78 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -504,12 +504,8 @@ bool rds_tcp_tune(struct socket *sock)
release_sock(sk);
return false;
}
- /* Update ns_tracker to current stack trace and refcounted tracker */
- __netns_tracker_free(net, &sk->ns_tracker, false);
-
- sk->sk_net_refcnt = 1;
- netns_tracker_alloc(net, &sk->ns_tracker, GFP_KERNEL);
- sock_inuse_add(net, 1);
+ sk_net_refcnt_upgrade(sk);
+ put_net(net);
}
rtn = net_generic(net, rds_tcp_netid);
if (rtn->sndbuf_size > 0) {
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index 5e740c486203..a64a0cab1bf7 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -360,7 +360,6 @@ struct rxrpc_peer {
u8 pmtud_jumbo; /* Max jumbo packets for the MTU */
bool ackr_adv_pmtud; /* T if the peer advertises path-MTU */
unsigned int ackr_max_data; /* Maximum data advertised by peer */
- seqcount_t mtu_lock; /* Lockless MTU access management */
unsigned int if_mtu; /* Local interface MTU (- hdrsize) for this peer */
unsigned int max_data; /* Maximum packet data capacity for this peer */
unsigned short hdrsize; /* header size (IP + UDP + RxRPC) */
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
index 9047ba13bd31..24aceb183c2c 100644
--- a/net/rxrpc/input.c
+++ b/net/rxrpc/input.c
@@ -810,9 +810,7 @@ static void rxrpc_input_ack_trailer(struct rxrpc_call *call, struct sk_buff *skb
if (max_mtu < peer->max_data) {
trace_rxrpc_pmtud_reduce(peer, sp->hdr.serial, max_mtu,
rxrpc_pmtud_reduce_ack);
- write_seqcount_begin(&peer->mtu_lock);
peer->max_data = max_mtu;
- write_seqcount_end(&peer->mtu_lock);
}
max_data = umin(max_mtu, peer->max_data);
diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c
index bc283da9ee40..7f4729234957 100644
--- a/net/rxrpc/peer_event.c
+++ b/net/rxrpc/peer_event.c
@@ -130,9 +130,7 @@ static void rxrpc_adjust_mtu(struct rxrpc_peer *peer, unsigned int mtu)
peer->pmtud_bad = max_data + 1;
trace_rxrpc_pmtud_reduce(peer, 0, max_data, rxrpc_pmtud_reduce_icmp);
- write_seqcount_begin(&peer->mtu_lock);
peer->max_data = max_data;
- write_seqcount_end(&peer->mtu_lock);
}
}
@@ -408,13 +406,8 @@ void rxrpc_input_probe_for_pmtud(struct rxrpc_connection *conn, rxrpc_serial_t a
}
max_data = umin(max_data, peer->ackr_max_data);
- if (max_data != peer->max_data) {
- preempt_disable();
- write_seqcount_begin(&peer->mtu_lock);
+ if (max_data != peer->max_data)
peer->max_data = max_data;
- write_seqcount_end(&peer->mtu_lock);
- preempt_enable();
- }
jumbo = max_data + sizeof(struct rxrpc_jumbo_header);
jumbo /= RXRPC_JUMBO_SUBPKTLEN;
diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c
index 0fcc87f0409f..56e09d161a97 100644
--- a/net/rxrpc/peer_object.c
+++ b/net/rxrpc/peer_object.c
@@ -235,7 +235,6 @@ struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *local, gfp_t gfp,
peer->service_conns = RB_ROOT;
seqlock_init(&peer->service_conn_lock);
spin_lock_init(&peer->lock);
- seqcount_init(&peer->mtu_lock);
peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
peer->recent_srtt_us = UINT_MAX;
peer->cong_ssthresh = RXRPC_TX_MAX_WINDOW;
@@ -325,10 +324,10 @@ void rxrpc_new_incoming_peer(struct rxrpc_local *local, struct rxrpc_peer *peer)
hash_key = rxrpc_peer_hash_key(local, &peer->srx);
rxrpc_init_peer(local, peer, hash_key);
- spin_lock_bh(&rxnet->peer_hash_lock);
+ spin_lock(&rxnet->peer_hash_lock);
hash_add_rcu(rxnet->peer_hash, &peer->hash_link, hash_key);
list_add_tail(&peer->keepalive_link, &rxnet->peer_keepalive_new);
- spin_unlock_bh(&rxnet->peer_hash_lock);
+ spin_unlock(&rxnet->peer_hash_lock);
}
/*
diff --git a/net/rxrpc/rxperf.c b/net/rxrpc/rxperf.c
index 7ef93407be83..e848a4777b8c 100644
--- a/net/rxrpc/rxperf.c
+++ b/net/rxrpc/rxperf.c
@@ -478,6 +478,18 @@ static int rxperf_deliver_request(struct rxperf_call *call)
call->unmarshal++;
fallthrough;
case 2:
+ ret = rxperf_extract_data(call, true);
+ if (ret < 0)
+ return ret;
+
+ /* Deal with the terminal magic cookie. */
+ call->iov_len = 4;
+ call->kvec[0].iov_len = call->iov_len;
+ call->kvec[0].iov_base = call->tmp;
+ iov_iter_kvec(&call->iter, READ, call->kvec, 1, call->iov_len);
+ call->unmarshal++;
+ fallthrough;
+ case 3:
ret = rxperf_extract_data(call, false);
if (ret < 0)
return ret;
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index ca6984541edb..3e6cb35baf25 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -3337,10 +3337,7 @@ int smc_create_clcsk(struct net *net, struct sock *sk, int family)
* which need net ref.
*/
sk = smc->clcsock->sk;
- __netns_tracker_free(net, &sk->ns_tracker, false);
- sk->sk_net_refcnt = 1;
- get_net_track(net, &sk->ns_tracker, GFP_KERNEL);
- sock_inuse_add(net, 1);
+ sk_net_refcnt_upgrade(sk);
return 0;
}
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index cb279eb9ac4b..7ce5e28a6c03 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -1674,12 +1674,14 @@ static void remove_cache_proc_entries(struct cache_detail *cd)
}
}
-#ifdef CONFIG_PROC_FS
static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
{
struct proc_dir_entry *p;
struct sunrpc_net *sn;
+ if (!IS_ENABLED(CONFIG_PROC_FS))
+ return 0;
+
sn = net_generic(net, sunrpc_net_id);
cd->procfs = proc_mkdir(cd->name, sn->proc_net_rpc);
if (cd->procfs == NULL)
@@ -1707,12 +1709,6 @@ out_nomem:
remove_cache_proc_entries(cd);
return -ENOMEM;
}
-#else /* CONFIG_PROC_FS */
-static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
-{
- return 0;
-}
-#endif
void __init cache_initialize(void)
{
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index cef623ea1506..9b45fbdc90ca 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -864,8 +864,6 @@ void rpc_signal_task(struct rpc_task *task)
if (!rpc_task_set_rpc_status(task, -ERESTARTSYS))
return;
trace_rpc_task_signalled(task, task->tk_action);
- set_bit(RPC_TASK_SIGNALLED, &task->tk_runstate);
- smp_mb__after_atomic();
queue = READ_ONCE(task->tk_waitqueue);
if (queue)
rpc_wake_up_queued_task(queue, task);
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index cb3bd12f5818..72e5a01df3d3 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -1541,10 +1541,7 @@ static struct svc_xprt *svc_create_socket(struct svc_serv *serv,
newlen = error;
if (protocol == IPPROTO_TCP) {
- __netns_tracker_free(net, &sock->sk->ns_tracker, false);
- sock->sk->sk_net_refcnt = 1;
- get_net_track(net, &sock->sk->ns_tracker, GFP_KERNEL);
- sock_inuse_add(net, 1);
+ sk_net_refcnt_upgrade(sock->sk);
if ((error = kernel_listen(sock, 64)) < 0)
goto bummer;
}
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index c60936d8cef7..83cc095846d3 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -1941,12 +1941,8 @@ static struct socket *xs_create_sock(struct rpc_xprt *xprt,
goto out;
}
- if (protocol == IPPROTO_TCP) {
- __netns_tracker_free(xprt->xprt_net, &sock->sk->ns_tracker, false);
- sock->sk->sk_net_refcnt = 1;
- get_net_track(xprt->xprt_net, &sock->sk->ns_tracker, GFP_KERNEL);
- sock_inuse_add(xprt->xprt_net, 1);
- }
+ if (protocol == IPPROTO_TCP)
+ sk_net_refcnt_upgrade(sock->sk);
filp = sock_alloc_file(sock, O_NONBLOCK, NULL);
if (IS_ERR(filp))
@@ -2581,7 +2577,15 @@ static void xs_tls_handshake_done(void *data, int status, key_serial_t peerid)
struct sock_xprt *lower_transport =
container_of(lower_xprt, struct sock_xprt, xprt);
- lower_transport->xprt_err = status ? -EACCES : 0;
+ switch (status) {
+ case 0:
+ case -EACCES:
+ case -ETIMEDOUT:
+ lower_transport->xprt_err = status;
+ break;
+ default:
+ lower_transport->xprt_err = -EACCES;
+ }
complete(&lower_transport->handshake_done);
xprt_put(lower_xprt);
}
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 34945de1fb1f..f0e613d97664 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -2102,6 +2102,7 @@ restart_locked:
goto out_sock_put;
}
+ sock_put(other);
goto lookup;
}
diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c
index 7c06ffd633d2..a5e730ffda57 100644
--- a/security/integrity/evm/evm_crypto.c
+++ b/security/integrity/evm/evm_crypto.c
@@ -180,7 +180,7 @@ static void hmac_add_misc(struct shash_desc *desc, struct inode *inode,
}
/*
- * Dump large security xattr values as a continuous ascii hexademical string.
+ * Dump large security xattr values as a continuous ascii hexadecimal string.
* (pr_debug is limited to 64 bytes.)
*/
static void dump_security_xattr_l(const char *prefix, const void *src,
diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c
index 377e57e9084f..0add782e73ba 100644
--- a/security/integrity/evm/evm_main.c
+++ b/security/integrity/evm/evm_main.c
@@ -169,7 +169,7 @@ static int is_unsupported_hmac_fs(struct dentry *dentry)
* and compare it against the stored security.evm xattr.
*
* For performance:
- * - use the previoulsy retrieved xattr value and length to calculate the
+ * - use the previously retrieved xattr value and length to calculate the
* HMAC.)
* - cache the verification result in the iint, when available.
*
diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
index 24d09ea91b87..a4f284bd846c 100644
--- a/security/integrity/ima/ima.h
+++ b/security/integrity/ima/ima.h
@@ -149,6 +149,9 @@ struct ima_kexec_hdr {
#define IMA_CHECK_BLACKLIST 0x40000000
#define IMA_VERITY_REQUIRED 0x80000000
+/* Exclude non-action flags which are not rule-specific. */
+#define IMA_NONACTION_RULE_FLAGS (IMA_NONACTION_FLAGS & ~IMA_NEW_FILE)
+
#define IMA_DO_MASK (IMA_MEASURE | IMA_APPRAISE | IMA_AUDIT | \
IMA_HASH | IMA_APPRAISE_SUBMASK)
#define IMA_DONE_MASK (IMA_MEASURED | IMA_APPRAISED | IMA_AUDITED | \
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
index 9f9897a7c217..28b8b0db6f9b 100644
--- a/security/integrity/ima/ima_main.c
+++ b/security/integrity/ima/ima_main.c
@@ -269,10 +269,13 @@ static int process_measurement(struct file *file, const struct cred *cred,
mutex_lock(&iint->mutex);
if (test_and_clear_bit(IMA_CHANGE_ATTR, &iint->atomic_flags))
- /* reset appraisal flags if ima_inode_post_setattr was called */
+ /*
+ * Reset appraisal flags (action and non-action rule-specific)
+ * if ima_inode_post_setattr was called.
+ */
iint->flags &= ~(IMA_APPRAISE | IMA_APPRAISED |
IMA_APPRAISE_SUBMASK | IMA_APPRAISED_SUBMASK |
- IMA_NONACTION_FLAGS);
+ IMA_NONACTION_RULE_FLAGS);
/*
* Re-evaulate the file if either the xattr has changed or the
@@ -1011,9 +1014,9 @@ int process_buffer_measurement(struct mnt_idmap *idmap,
}
/*
- * Both LSM hooks and auxilary based buffer measurements are
- * based on policy. To avoid code duplication, differentiate
- * between the LSM hooks and auxilary buffer measurements,
+ * Both LSM hooks and auxiliary based buffer measurements are
+ * based on policy. To avoid code duplication, differentiate
+ * between the LSM hooks and auxiliary buffer measurements,
* retrieving the policy rule information only for the LSM hook
* buffer measurements.
*/
diff --git a/security/landlock/net.c b/security/landlock/net.c
index d5dcc4407a19..104b6c01fe50 100644
--- a/security/landlock/net.c
+++ b/security/landlock/net.c
@@ -63,8 +63,7 @@ static int current_check_access_socket(struct socket *const sock,
if (WARN_ON_ONCE(dom->num_layers < 1))
return -EACCES;
- /* Checks if it's a (potential) TCP socket. */
- if (sock->type != SOCK_STREAM)
+ if (!sk_is_tcp(sock->sk))
return 0;
/* Checks for minimal header length to safely read sa_family. */
diff --git a/security/landlock/ruleset.c b/security/landlock/ruleset.c
index 241ce44375b6..bff4e40a3093 100644
--- a/security/landlock/ruleset.c
+++ b/security/landlock/ruleset.c
@@ -124,7 +124,7 @@ create_rule(const struct landlock_id id,
return ERR_PTR(-ENOMEM);
RB_CLEAR_NODE(&new_rule->node);
if (is_object_pointer(id.type)) {
- /* This should be catched by insert_rule(). */
+ /* This should have been caught by insert_rule(). */
WARN_ON_ONCE(!id.key.object);
landlock_get_object(id.key.object);
}
diff --git a/sound/pci/hda/cs35l56_hda_spi.c b/sound/pci/hda/cs35l56_hda_spi.c
index d4ee5bb7c486..903578466905 100644
--- a/sound/pci/hda/cs35l56_hda_spi.c
+++ b/sound/pci/hda/cs35l56_hda_spi.c
@@ -22,6 +22,9 @@ static int cs35l56_hda_spi_probe(struct spi_device *spi)
return -ENOMEM;
cs35l56->base.dev = &spi->dev;
+ ret = cs35l56_init_config_for_spi(&cs35l56->base, spi);
+ if (ret)
+ return ret;
#ifdef CS35L56_WAKE_HOLD_TIME_US
cs35l56->base.can_hibernate = true;
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 224616fbec4f..c735f630ecb5 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -10623,6 +10623,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC),
SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK),
SND_PCI_QUIRK(0x1043, 0x1433, "ASUS GX650PY/PZ/PV/PU/PYV/PZV/PIV/PVV", ALC285_FIXUP_ASUS_I2C_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1043, 0x1460, "Asus VivoBook 15", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1043, 0x1463, "Asus GA402X/GA402N", ALC285_FIXUP_ASUS_I2C_HEADSET_MIC),
SND_PCI_QUIRK(0x1043, 0x1473, "ASUS GU604VI/VC/VE/VG/VJ/VQ/VU/VV/VY/VZ", ALC285_FIXUP_ASUS_HEADSET_MIC),
SND_PCI_QUIRK(0x1043, 0x1483, "ASUS GU603VQ/VU/VV/VJ/VI", ALC285_FIXUP_ASUS_HEADSET_MIC),
@@ -10656,7 +10657,6 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x19ce, "ASUS B9450FA", ALC294_FIXUP_ASUS_HPE),
SND_PCI_QUIRK(0x1043, 0x19e1, "ASUS UX581LV", ALC295_FIXUP_ASUS_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
- SND_PCI_QUIRK(0x1043, 0x1a30, "ASUS X705UD", ALC256_FIXUP_ASUS_MIC),
SND_PCI_QUIRK(0x1043, 0x1a63, "ASUS UX3405MA", ALC245_FIXUP_CS35L41_SPI_2),
SND_PCI_QUIRK(0x1043, 0x1a83, "ASUS UM5302LA", ALC294_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x1043, 0x1a8f, "ASUS UX582ZS", ALC245_FIXUP_CS35L41_SPI_2),
diff --git a/sound/soc/codecs/cs35l56-shared.c b/sound/soc/codecs/cs35l56-shared.c
index e0ed4fc11155..e28bfefa72f3 100644
--- a/sound/soc/codecs/cs35l56-shared.c
+++ b/sound/soc/codecs/cs35l56-shared.c
@@ -10,6 +10,7 @@
#include <linux/gpio/consumer.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
#include <linux/types.h>
#include <sound/cs-amp-lib.h>
@@ -303,6 +304,79 @@ void cs35l56_wait_min_reset_pulse(void)
}
EXPORT_SYMBOL_NS_GPL(cs35l56_wait_min_reset_pulse, "SND_SOC_CS35L56_SHARED");
+static const struct {
+ u32 addr;
+ u32 value;
+} cs35l56_spi_system_reset_stages[] = {
+ { .addr = CS35L56_DSP_VIRTUAL1_MBOX_1, .value = CS35L56_MBOX_CMD_SYSTEM_RESET },
+ /* The next write is necessary to delimit the soft reset */
+ { .addr = CS35L56_DSP_MBOX_1_RAW, .value = CS35L56_MBOX_CMD_PING },
+};
+
+static void cs35l56_spi_issue_bus_locked_reset(struct cs35l56_base *cs35l56_base,
+ struct spi_device *spi)
+{
+ struct cs35l56_spi_payload *buf = cs35l56_base->spi_payload_buf;
+ struct spi_transfer t = {
+ .tx_buf = buf,
+ .len = sizeof(*buf),
+ };
+ struct spi_message m;
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(cs35l56_spi_system_reset_stages); i++) {
+ buf->addr = cpu_to_be32(cs35l56_spi_system_reset_stages[i].addr);
+ buf->value = cpu_to_be32(cs35l56_spi_system_reset_stages[i].value);
+ spi_message_init_with_transfers(&m, &t, 1);
+ ret = spi_sync_locked(spi, &m);
+ if (ret)
+ dev_warn(cs35l56_base->dev, "spi_sync failed: %d\n", ret);
+
+ usleep_range(CS35L56_SPI_RESET_TO_PORT_READY_US,
+ 2 * CS35L56_SPI_RESET_TO_PORT_READY_US);
+ }
+}
+
+static void cs35l56_spi_system_reset(struct cs35l56_base *cs35l56_base)
+{
+ struct spi_device *spi = to_spi_device(cs35l56_base->dev);
+ unsigned int val;
+ int read_ret, ret;
+
+ /*
+ * There must not be any other SPI bus activity while the amp is
+ * soft-resetting.
+ */
+ ret = spi_bus_lock(spi->controller);
+ if (ret) {
+ dev_warn(cs35l56_base->dev, "spi_bus_lock failed: %d\n", ret);
+ return;
+ }
+
+ cs35l56_spi_issue_bus_locked_reset(cs35l56_base, spi);
+ spi_bus_unlock(spi->controller);
+
+ /*
+ * Check firmware boot by testing for a response in MBOX_2.
+ * HALO_STATE cannot be trusted yet because the reset sequence
+ * can leave it with stale state. But MBOX is reset.
+ * The regmap must remain in cache-only until the chip has
+ * booted, so use a bypassed read.
+ */
+ ret = read_poll_timeout(regmap_read_bypassed, read_ret,
+ (val > 0) && (val < 0xffffffff),
+ CS35L56_HALO_STATE_POLL_US,
+ CS35L56_HALO_STATE_TIMEOUT_US,
+ false,
+ cs35l56_base->regmap,
+ CS35L56_DSP_VIRTUAL1_MBOX_2,
+ &val);
+ if (ret) {
+ dev_err(cs35l56_base->dev, "SPI reboot timed out(%d): MBOX2=%#x\n",
+ read_ret, val);
+ }
+}
+
static const struct reg_sequence cs35l56_system_reset_seq[] = {
REG_SEQ0(CS35L56_DSP1_HALO_STATE, 0),
REG_SEQ0(CS35L56_DSP_VIRTUAL1_MBOX_1, CS35L56_MBOX_CMD_SYSTEM_RESET),
@@ -315,6 +389,12 @@ void cs35l56_system_reset(struct cs35l56_base *cs35l56_base, bool is_soundwire)
* accesses other than the controlled system reset sequence below.
*/
regcache_cache_only(cs35l56_base->regmap, true);
+
+ if (cs35l56_is_spi(cs35l56_base)) {
+ cs35l56_spi_system_reset(cs35l56_base);
+ return;
+ }
+
regmap_multi_reg_write_bypassed(cs35l56_base->regmap,
cs35l56_system_reset_seq,
ARRAY_SIZE(cs35l56_system_reset_seq));
diff --git a/sound/soc/codecs/cs35l56-spi.c b/sound/soc/codecs/cs35l56-spi.c
index c101134e8532..ca6c03a8766d 100644
--- a/sound/soc/codecs/cs35l56-spi.c
+++ b/sound/soc/codecs/cs35l56-spi.c
@@ -33,6 +33,9 @@ static int cs35l56_spi_probe(struct spi_device *spi)
cs35l56->base.dev = &spi->dev;
cs35l56->base.can_hibernate = true;
+ ret = cs35l56_init_config_for_spi(&cs35l56->base, spi);
+ if (ret)
+ return ret;
ret = cs35l56_common_probe(cs35l56);
if (ret != 0)
diff --git a/sound/soc/codecs/es8328.c b/sound/soc/codecs/es8328.c
index f3c97da798dc..76159c45e6b5 100644
--- a/sound/soc/codecs/es8328.c
+++ b/sound/soc/codecs/es8328.c
@@ -233,7 +233,6 @@ static const struct snd_kcontrol_new es8328_right_line_controls =
/* Left Mixer */
static const struct snd_kcontrol_new es8328_left_mixer_controls[] = {
- SOC_DAPM_SINGLE("Playback Switch", ES8328_DACCONTROL17, 7, 1, 0),
SOC_DAPM_SINGLE("Left Bypass Switch", ES8328_DACCONTROL17, 6, 1, 0),
SOC_DAPM_SINGLE("Right Playback Switch", ES8328_DACCONTROL18, 7, 1, 0),
SOC_DAPM_SINGLE("Right Bypass Switch", ES8328_DACCONTROL18, 6, 1, 0),
@@ -243,7 +242,6 @@ static const struct snd_kcontrol_new es8328_left_mixer_controls[] = {
static const struct snd_kcontrol_new es8328_right_mixer_controls[] = {
SOC_DAPM_SINGLE("Left Playback Switch", ES8328_DACCONTROL19, 7, 1, 0),
SOC_DAPM_SINGLE("Left Bypass Switch", ES8328_DACCONTROL19, 6, 1, 0),
- SOC_DAPM_SINGLE("Playback Switch", ES8328_DACCONTROL20, 7, 1, 0),
SOC_DAPM_SINGLE("Right Bypass Switch", ES8328_DACCONTROL20, 6, 1, 0),
};
@@ -336,10 +334,10 @@ static const struct snd_soc_dapm_widget es8328_dapm_widgets[] = {
SND_SOC_DAPM_DAC("Left DAC", "Left Playback", ES8328_DACPOWER,
ES8328_DACPOWER_LDAC_OFF, 1),
- SND_SOC_DAPM_MIXER("Left Mixer", SND_SOC_NOPM, 0, 0,
+ SND_SOC_DAPM_MIXER("Left Mixer", ES8328_DACCONTROL17, 7, 0,
&es8328_left_mixer_controls[0],
ARRAY_SIZE(es8328_left_mixer_controls)),
- SND_SOC_DAPM_MIXER("Right Mixer", SND_SOC_NOPM, 0, 0,
+ SND_SOC_DAPM_MIXER("Right Mixer", ES8328_DACCONTROL20, 7, 0,
&es8328_right_mixer_controls[0],
ARRAY_SIZE(es8328_right_mixer_controls)),
@@ -418,19 +416,14 @@ static const struct snd_soc_dapm_route es8328_dapm_routes[] = {
{ "Right Line Mux", "PGA", "Right PGA Mux" },
{ "Right Line Mux", "Differential", "Differential Mux" },
- { "Left Out 1", NULL, "Left DAC" },
- { "Right Out 1", NULL, "Right DAC" },
- { "Left Out 2", NULL, "Left DAC" },
- { "Right Out 2", NULL, "Right DAC" },
-
- { "Left Mixer", "Playback Switch", "Left DAC" },
+ { "Left Mixer", NULL, "Left DAC" },
{ "Left Mixer", "Left Bypass Switch", "Left Line Mux" },
{ "Left Mixer", "Right Playback Switch", "Right DAC" },
{ "Left Mixer", "Right Bypass Switch", "Right Line Mux" },
{ "Right Mixer", "Left Playback Switch", "Left DAC" },
{ "Right Mixer", "Left Bypass Switch", "Left Line Mux" },
- { "Right Mixer", "Playback Switch", "Right DAC" },
+ { "Right Mixer", NULL, "Right DAC" },
{ "Right Mixer", "Right Bypass Switch", "Right Line Mux" },
{ "DAC DIG", NULL, "DAC STM" },
diff --git a/sound/soc/codecs/tas2764.c b/sound/soc/codecs/tas2764.c
index d482cd194c08..58315eab492a 100644
--- a/sound/soc/codecs/tas2764.c
+++ b/sound/soc/codecs/tas2764.c
@@ -365,7 +365,7 @@ static int tas2764_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
{
struct snd_soc_component *component = dai->component;
struct tas2764_priv *tas2764 = snd_soc_component_get_drvdata(component);
- u8 tdm_rx_start_slot = 0, asi_cfg_0 = 0, asi_cfg_1 = 0;
+ u8 tdm_rx_start_slot = 0, asi_cfg_0 = 0, asi_cfg_1 = 0, asi_cfg_4 = 0;
int ret;
switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
@@ -374,12 +374,14 @@ static int tas2764_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
fallthrough;
case SND_SOC_DAIFMT_NB_NF:
asi_cfg_1 = TAS2764_TDM_CFG1_RX_RISING;
+ asi_cfg_4 = TAS2764_TDM_CFG4_TX_FALLING;
break;
case SND_SOC_DAIFMT_IB_IF:
asi_cfg_0 ^= TAS2764_TDM_CFG0_FRAME_START;
fallthrough;
case SND_SOC_DAIFMT_IB_NF:
asi_cfg_1 = TAS2764_TDM_CFG1_RX_FALLING;
+ asi_cfg_4 = TAS2764_TDM_CFG4_TX_RISING;
break;
}
@@ -389,6 +391,12 @@ static int tas2764_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
if (ret < 0)
return ret;
+ ret = snd_soc_component_update_bits(component, TAS2764_TDM_CFG4,
+ TAS2764_TDM_CFG4_TX_MASK,
+ asi_cfg_4);
+ if (ret < 0)
+ return ret;
+
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_I2S:
asi_cfg_0 ^= TAS2764_TDM_CFG0_FRAME_START;
diff --git a/sound/soc/codecs/tas2764.h b/sound/soc/codecs/tas2764.h
index 168af772a898..9490f2686e38 100644
--- a/sound/soc/codecs/tas2764.h
+++ b/sound/soc/codecs/tas2764.h
@@ -25,7 +25,7 @@
/* Power Control */
#define TAS2764_PWR_CTRL TAS2764_REG(0X0, 0x02)
-#define TAS2764_PWR_CTRL_MASK GENMASK(1, 0)
+#define TAS2764_PWR_CTRL_MASK GENMASK(2, 0)
#define TAS2764_PWR_CTRL_ACTIVE 0x0
#define TAS2764_PWR_CTRL_MUTE BIT(0)
#define TAS2764_PWR_CTRL_SHUTDOWN BIT(1)
@@ -79,6 +79,12 @@
#define TAS2764_TDM_CFG3_RXS_SHIFT 0x4
#define TAS2764_TDM_CFG3_MASK GENMASK(3, 0)
+/* TDM Configuration Reg4 */
+#define TAS2764_TDM_CFG4 TAS2764_REG(0X0, 0x0d)
+#define TAS2764_TDM_CFG4_TX_MASK BIT(0)
+#define TAS2764_TDM_CFG4_TX_RISING 0x0
+#define TAS2764_TDM_CFG4_TX_FALLING BIT(0)
+
/* TDM Configuration Reg5 */
#define TAS2764_TDM_CFG5 TAS2764_REG(0X0, 0x0e)
#define TAS2764_TDM_CFG5_VSNS_MASK BIT(6)
diff --git a/sound/soc/codecs/tas2770.c b/sound/soc/codecs/tas2770.c
index 9f93b230652a..863c3f672ba9 100644
--- a/sound/soc/codecs/tas2770.c
+++ b/sound/soc/codecs/tas2770.c
@@ -506,7 +506,7 @@ static int tas2770_codec_probe(struct snd_soc_component *component)
}
static DECLARE_TLV_DB_SCALE(tas2770_digital_tlv, 1100, 50, 0);
-static DECLARE_TLV_DB_SCALE(tas2770_playback_volume, -12750, 50, 0);
+static DECLARE_TLV_DB_SCALE(tas2770_playback_volume, -10050, 50, 0);
static const struct snd_kcontrol_new tas2770_snd_controls[] = {
SOC_SINGLE_TLV("Speaker Playback Volume", TAS2770_PLAY_CFG_REG2,
diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c
index c4eb87c5d39e..9f33dd11d47f 100644
--- a/sound/soc/fsl/fsl_sai.c
+++ b/sound/soc/fsl/fsl_sai.c
@@ -994,10 +994,10 @@ static struct snd_soc_dai_driver fsl_sai_dai_template[] = {
{
.name = "sai-tx",
.playback = {
- .stream_name = "CPU-Playback",
+ .stream_name = "SAI-Playback",
.channels_min = 1,
.channels_max = 32,
- .rate_min = 8000,
+ .rate_min = 8000,
.rate_max = 2822400,
.rates = SNDRV_PCM_RATE_KNOT,
.formats = FSL_SAI_FORMATS,
@@ -1007,7 +1007,7 @@ static struct snd_soc_dai_driver fsl_sai_dai_template[] = {
{
.name = "sai-rx",
.capture = {
- .stream_name = "CPU-Capture",
+ .stream_name = "SAI-Capture",
.channels_min = 1,
.channels_max = 32,
.rate_min = 8000,
diff --git a/sound/soc/fsl/imx-audmix.c b/sound/soc/fsl/imx-audmix.c
index 50ecc5f51100..dac5d4ddacd6 100644
--- a/sound/soc/fsl/imx-audmix.c
+++ b/sound/soc/fsl/imx-audmix.c
@@ -119,8 +119,8 @@ static const struct snd_soc_ops imx_audmix_be_ops = {
static const char *name[][3] = {
{"HiFi-AUDMIX-FE-0", "HiFi-AUDMIX-FE-1", "HiFi-AUDMIX-FE-2"},
{"sai-tx", "sai-tx", "sai-rx"},
- {"AUDMIX-Playback-0", "AUDMIX-Playback-1", "CPU-Capture"},
- {"CPU-Playback", "CPU-Playback", "AUDMIX-Capture-0"},
+ {"AUDMIX-Playback-0", "AUDMIX-Playback-1", "SAI-Capture"},
+ {"SAI-Playback", "SAI-Playback", "AUDMIX-Capture-0"},
};
static int imx_audmix_probe(struct platform_device *pdev)
diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c
index 203b07d4d833..c13064c77726 100644
--- a/sound/soc/intel/boards/sof_sdw.c
+++ b/sound/soc/intel/boards/sof_sdw.c
@@ -803,7 +803,9 @@ static int create_sdw_dailink(struct snd_soc_card *card,
int *be_id, struct snd_soc_codec_conf **codec_conf)
{
struct device *dev = card->dev;
+ struct snd_soc_acpi_mach *mach = dev_get_platdata(card->dev);
struct asoc_sdw_mc_private *ctx = snd_soc_card_get_drvdata(card);
+ struct snd_soc_acpi_mach_params *mach_params = &mach->mach_params;
struct intel_mc_ctx *intel_ctx = (struct intel_mc_ctx *)ctx->private;
struct asoc_sdw_endpoint *sof_end;
int stream;
@@ -900,6 +902,11 @@ static int create_sdw_dailink(struct snd_soc_card *card,
codecs[j].name = sof_end->codec_name;
codecs[j].dai_name = sof_end->dai_info->dai_name;
+ if (sof_end->dai_info->dai_type == SOC_SDW_DAI_TYPE_MIC &&
+ mach_params->dmic_num > 0) {
+ dev_warn(dev,
+ "Both SDW DMIC and PCH DMIC are present, if incorrect, please set kernel params snd_sof_intel_hda_generic dmic_num=0 to disable PCH DMIC\n");
+ }
j++;
}
diff --git a/sound/soc/sof/intel/hda.c b/sound/soc/sof/intel/hda.c
index be689f6e10c8..a1ccd95da8bb 100644
--- a/sound/soc/sof/intel/hda.c
+++ b/sound/soc/sof/intel/hda.c
@@ -1312,22 +1312,8 @@ struct snd_soc_acpi_mach *hda_machine_select(struct snd_sof_dev *sdev)
/* report to machine driver if any DMICs are found */
mach->mach_params.dmic_num = check_dmic_num(sdev);
- if (sdw_mach_found) {
- /*
- * DMICs use up to 4 pins and are typically pin-muxed with SoundWire
- * link 2 and 3, or link 1 and 2, thus we only try to enable dmics
- * if all conditions are true:
- * a) 2 or fewer links are used by SoundWire
- * b) the NHLT table reports the presence of microphones
- */
- if (hweight_long(mach->link_mask) <= 2)
- dmic_fixup = true;
- else
- mach->mach_params.dmic_num = 0;
- } else {
- if (mach->tplg_quirk_mask & SND_SOC_ACPI_TPLG_INTEL_DMIC_NUMBER)
- dmic_fixup = true;
- }
+ if (sdw_mach_found || mach->tplg_quirk_mask & SND_SOC_ACPI_TPLG_INTEL_DMIC_NUMBER)
+ dmic_fixup = true;
if (tplg_fixup &&
dmic_fixup &&
diff --git a/sound/usb/midi.c b/sound/usb/midi.c
index 737dd00e97b1..779d97d31f17 100644
--- a/sound/usb/midi.c
+++ b/sound/usb/midi.c
@@ -1145,7 +1145,7 @@ static int snd_usbmidi_output_close(struct snd_rawmidi_substream *substream)
{
struct usbmidi_out_port *port = substream->runtime->private_data;
- cancel_work_sync(&port->ep->work);
+ flush_work(&port->ep->work);
return substream_open(substream, 0, 0);
}
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index a97efb7b131e..09210fb4ac60 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1868,6 +1868,7 @@ void snd_usb_set_format_quirk(struct snd_usb_substream *subs,
case USB_ID(0x534d, 0x2109): /* MacroSilicon MS2109 */
subs->stream_offset_adj = 2;
break;
+ case USB_ID(0x2b73, 0x000a): /* Pioneer DJM-900NXS2 */
case USB_ID(0x2b73, 0x0013): /* Pioneer DJM-450 */
pioneer_djm_set_format_quirk(subs, 0x0082);
break;
diff --git a/tools/arch/arm64/tools/Makefile b/tools/arch/arm64/tools/Makefile
index 7b42feedf647..de4f1b66ef01 100644
--- a/tools/arch/arm64/tools/Makefile
+++ b/tools/arch/arm64/tools/Makefile
@@ -13,12 +13,6 @@ AWK ?= awk
MKDIR ?= mkdir
RM ?= rm
-ifeq ($(V),1)
-Q =
-else
-Q = @
-endif
-
arm64_tools_dir = $(top_srcdir)/arch/arm64/tools
arm64_sysreg_tbl = $(arm64_tools_dir)/sysreg
arm64_gen_sysreg = $(arm64_tools_dir)/gen-sysreg.awk
diff --git a/tools/bpf/Makefile b/tools/bpf/Makefile
index 243b79f2b451..062bbd6cd048 100644
--- a/tools/bpf/Makefile
+++ b/tools/bpf/Makefile
@@ -27,12 +27,6 @@ srctree := $(patsubst %/,%,$(dir $(CURDIR)))
srctree := $(patsubst %/,%,$(dir $(srctree)))
endif
-ifeq ($(V),1)
- Q =
-else
- Q = @
-endif
-
FEATURE_USER = .bpf
FEATURE_TESTS = libbfd disassembler-four-args disassembler-init-styled
FEATURE_DISPLAY = libbfd
diff --git a/tools/bpf/bpftool/Documentation/Makefile b/tools/bpf/bpftool/Documentation/Makefile
index 4315652678b9..bf843f328812 100644
--- a/tools/bpf/bpftool/Documentation/Makefile
+++ b/tools/bpf/bpftool/Documentation/Makefile
@@ -5,12 +5,6 @@ INSTALL ?= install
RM ?= rm -f
RMDIR ?= rmdir --ignore-fail-on-non-empty
-ifeq ($(V),1)
- Q =
-else
- Q = @
-endif
-
prefix ?= /usr/local
mandir ?= $(prefix)/man
man8dir = $(mandir)/man8
diff --git a/tools/bpf/bpftool/Makefile b/tools/bpf/bpftool/Makefile
index dd9f3ec84201..6ea4823b770c 100644
--- a/tools/bpf/bpftool/Makefile
+++ b/tools/bpf/bpftool/Makefile
@@ -7,12 +7,6 @@ srctree := $(patsubst %/,%,$(dir $(srctree)))
srctree := $(patsubst %/,%,$(dir $(srctree)))
endif
-ifeq ($(V),1)
- Q =
-else
- Q = @
-endif
-
BPF_DIR = $(srctree)/tools/lib/bpf
ifneq ($(OUTPUT),)
diff --git a/tools/bpf/resolve_btfids/Makefile b/tools/bpf/resolve_btfids/Makefile
index 4b8079f294f6..afbddea3a39c 100644
--- a/tools/bpf/resolve_btfids/Makefile
+++ b/tools/bpf/resolve_btfids/Makefile
@@ -5,10 +5,8 @@ include ../../scripts/Makefile.arch
srctree := $(abspath $(CURDIR)/../../../)
ifeq ($(V),1)
- Q =
msg =
else
- Q = @
ifeq ($(silent),1)
msg =
else
diff --git a/tools/bpf/runqslower/Makefile b/tools/bpf/runqslower/Makefile
index c4f1f1735af6..e49203ebd48c 100644
--- a/tools/bpf/runqslower/Makefile
+++ b/tools/bpf/runqslower/Makefile
@@ -26,10 +26,7 @@ VMLINUX_BTF_PATHS := $(if $(O),$(O)/vmlinux) \
VMLINUX_BTF_PATH := $(or $(VMLINUX_BTF),$(firstword \
$(wildcard $(VMLINUX_BTF_PATHS))))
-ifeq ($(V),1)
-Q =
-else
-Q = @
+ifneq ($(V),1)
MAKEFLAGS += --no-print-directory
submake_extras := feature_display=0
endif
diff --git a/tools/build/Makefile b/tools/build/Makefile
index 18ad131f6ea7..63ef21878761 100644
--- a/tools/build/Makefile
+++ b/tools/build/Makefile
@@ -17,13 +17,7 @@ $(call allow-override,LD,$(CROSS_COMPILE)ld)
export HOSTCC HOSTLD HOSTAR
-ifeq ($(V),1)
- Q =
-else
- Q = @
-endif
-
-export Q srctree CC LD
+export srctree CC LD
MAKEFLAGS := --no-print-directory
build := -f $(srctree)/tools/build/Makefile.build dir=. obj
diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile
index 857a5f7b413d..168140f8e646 100644
--- a/tools/lib/bpf/Makefile
+++ b/tools/lib/bpf/Makefile
@@ -53,13 +53,6 @@ include $(srctree)/tools/scripts/Makefile.include
# copy a bit from Linux kbuild
-ifeq ("$(origin V)", "command line")
- VERBOSE = $(V)
-endif
-ifndef VERBOSE
- VERBOSE = 0
-endif
-
INCLUDES = -I$(or $(OUTPUT),.) \
-I$(srctree)/tools/include -I$(srctree)/tools/include/uapi \
-I$(srctree)/tools/arch/$(SRCARCH)/include
@@ -96,12 +89,6 @@ override CFLAGS += $(CLANG_CROSS_FLAGS)
# flags specific for shared library
SHLIB_FLAGS := -DSHARED -fPIC
-ifeq ($(VERBOSE),1)
- Q =
-else
- Q = @
-endif
-
# Disable command line variables (CFLAGS) override from top
# level Makefile (perf), otherwise build Makefile will get
# the same command line setup.
diff --git a/tools/lib/perf/Makefile b/tools/lib/perf/Makefile
index 3a9b2140aa04..e9a7ac2c062e 100644
--- a/tools/lib/perf/Makefile
+++ b/tools/lib/perf/Makefile
@@ -39,19 +39,6 @@ libdir = $(prefix)/$(libdir_relative)
libdir_SQ = $(subst ','\'',$(libdir))
libdir_relative_SQ = $(subst ','\'',$(libdir_relative))
-ifeq ("$(origin V)", "command line")
- VERBOSE = $(V)
-endif
-ifndef VERBOSE
- VERBOSE = 0
-endif
-
-ifeq ($(VERBOSE),1)
- Q =
-else
- Q = @
-endif
-
TEST_ARGS := $(if $(V),-v)
# Set compile option CFLAGS
diff --git a/tools/lib/thermal/Makefile b/tools/lib/thermal/Makefile
index 8890fd57b110..a1f5e388644d 100644
--- a/tools/lib/thermal/Makefile
+++ b/tools/lib/thermal/Makefile
@@ -39,19 +39,6 @@ libdir = $(prefix)/$(libdir_relative)
libdir_SQ = $(subst ','\'',$(libdir))
libdir_relative_SQ = $(subst ','\'',$(libdir_relative))
-ifeq ("$(origin V)", "command line")
- VERBOSE = $(V)
-endif
-ifndef VERBOSE
- VERBOSE = 0
-endif
-
-ifeq ($(VERBOSE),1)
- Q =
-else
- Q = @
-endif
-
# Set compile option CFLAGS
ifdef EXTRA_CFLAGS
CFLAGS := $(EXTRA_CFLAGS)
diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile
index f56e27727534..7a65948892e5 100644
--- a/tools/objtool/Makefile
+++ b/tools/objtool/Makefile
@@ -46,12 +46,6 @@ HOST_OVERRIDES := CC="$(HOSTCC)" LD="$(HOSTLD)" AR="$(HOSTAR)"
AWK = awk
MKDIR = mkdir
-ifeq ($(V),1)
- Q =
-else
- Q = @
-endif
-
BUILD_ORC := n
ifeq ($(SRCARCH),x86)
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index be18a0489303..ce973d9d8e6d 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -2472,13 +2472,14 @@ static void mark_rodata(struct objtool_file *file)
*
* - .rodata: can contain GCC switch tables
* - .rodata.<func>: same, if -fdata-sections is being used
- * - .rodata..c_jump_table: contains C annotated jump tables
+ * - .data.rel.ro.c_jump_table: contains C annotated jump tables
*
* .rodata.str1.* sections are ignored; they don't contain jump tables.
*/
for_each_sec(file, sec) {
- if (!strncmp(sec->name, ".rodata", 7) &&
- !strstr(sec->name, ".str1.")) {
+ if ((!strncmp(sec->name, ".rodata", 7) &&
+ !strstr(sec->name, ".str1.")) ||
+ !strncmp(sec->name, ".data.rel.ro", 12)) {
sec->rodata = true;
found = true;
}
diff --git a/tools/objtool/include/objtool/special.h b/tools/objtool/include/objtool/special.h
index e7ee7ffccefd..e049679bb17b 100644
--- a/tools/objtool/include/objtool/special.h
+++ b/tools/objtool/include/objtool/special.h
@@ -10,7 +10,7 @@
#include <objtool/check.h>
#include <objtool/elf.h>
-#define C_JUMP_TABLE_SECTION ".rodata..c_jump_table"
+#define C_JUMP_TABLE_SECTION ".data.rel.ro.c_jump_table"
struct special_alt {
struct list_head list;
diff --git a/tools/objtool/noreturns.h b/tools/objtool/noreturns.h
index b2174894f9f7..6bb7edda3094 100644
--- a/tools/objtool/noreturns.h
+++ b/tools/objtool/noreturns.h
@@ -19,7 +19,7 @@ NORETURN(__x64_sys_exit_group)
NORETURN(arch_cpu_idle_dead)
NORETURN(bch2_trans_in_restart_error)
NORETURN(bch2_trans_restart_error)
-NORETURN(bch2_trans_unlocked_error)
+NORETURN(bch2_trans_unlocked_or_in_restart_error)
NORETURN(cpu_bringup_and_idle)
NORETURN(cpu_startup_entry)
NORETURN(do_exit)
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index 55d6ce9ea52f..05c083bb1122 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -161,47 +161,6 @@ export VPATH
SOURCE := $(shell ln -sf $(srctree)/tools/perf $(OUTPUT)/source)
endif
-# Beautify output
-# ---------------------------------------------------------------------------
-#
-# Most of build commands in Kbuild start with "cmd_". You can optionally define
-# "quiet_cmd_*". If defined, the short log is printed. Otherwise, no log from
-# that command is printed by default.
-#
-# e.g.)
-# quiet_cmd_depmod = DEPMOD $(MODLIB)
-# cmd_depmod = $(srctree)/scripts/depmod.sh $(DEPMOD) $(KERNELRELEASE)
-#
-# A simple variant is to prefix commands with $(Q) - that's useful
-# for commands that shall be hidden in non-verbose mode.
-#
-# $(Q)$(MAKE) $(build)=scripts/basic
-#
-# To put more focus on warnings, be less verbose as default
-# Use 'make V=1' to see the full commands
-
-ifeq ($(V),1)
- quiet =
- Q =
-else
- quiet=quiet_
- Q=@
-endif
-
-# If the user is running make -s (silent mode), suppress echoing of commands
-# make-4.0 (and later) keep single letter options in the 1st word of MAKEFLAGS.
-ifeq ($(filter 3.%,$(MAKE_VERSION)),)
-short-opts := $(firstword -$(MAKEFLAGS))
-else
-short-opts := $(filter-out --%,$(MAKEFLAGS))
-endif
-
-ifneq ($(findstring s,$(short-opts)),)
- quiet=silent_
-endif
-
-export quiet Q
-
# Do not use make's built-in rules
# (this improves performance and avoids hard-to-debug behaviour);
MAKEFLAGS += -r
diff --git a/tools/scripts/Makefile.include b/tools/scripts/Makefile.include
index 0aa4005017c7..45f4abef7064 100644
--- a/tools/scripts/Makefile.include
+++ b/tools/scripts/Makefile.include
@@ -136,6 +136,33 @@ else
NO_SUBDIR = :
endif
+# Beautify output
+# ---------------------------------------------------------------------------
+#
+# Most of build commands in Kbuild start with "cmd_". You can optionally define
+# "quiet_cmd_*". If defined, the short log is printed. Otherwise, no log from
+# that command is printed by default.
+#
+# e.g.)
+# quiet_cmd_depmod = DEPMOD $(MODLIB)
+# cmd_depmod = $(srctree)/scripts/depmod.sh $(DEPMOD) $(KERNELRELEASE)
+#
+# A simple variant is to prefix commands with $(Q) - that's useful
+# for commands that shall be hidden in non-verbose mode.
+#
+# $(Q)$(MAKE) $(build)=scripts/basic
+#
+# To put more focus on warnings, be less verbose as default
+# Use 'make V=1' to see the full commands
+
+ifeq ($(V),1)
+ quiet =
+ Q =
+else
+ quiet = quiet_
+ Q = @
+endif
+
# If the user is running make -s (silent mode), suppress echoing of commands
# make-4.0 (and later) keep single letter options in the 1st word of MAKEFLAGS.
ifeq ($(filter 3.%,$(MAKE_VERSION)),)
@@ -146,8 +173,11 @@ endif
ifneq ($(findstring s,$(short-opts)),)
silent=1
+ quiet=silent_
endif
+export quiet Q
+
#
# Define a callable command for descending to a new directory
#
diff --git a/tools/sound/dapm-graph b/tools/sound/dapm-graph
index f14bdfedee8f..b6196ee5065a 100755
--- a/tools/sound/dapm-graph
+++ b/tools/sound/dapm-graph
@@ -10,7 +10,7 @@ set -eu
STYLE_COMPONENT_ON="color=dodgerblue;style=bold"
STYLE_COMPONENT_OFF="color=gray40;style=filled;fillcolor=gray90"
-STYLE_NODE_ON="shape=box,style=bold,color=green4"
+STYLE_NODE_ON="shape=box,style=bold,color=green4,fillcolor=white"
STYLE_NODE_OFF="shape=box,style=filled,color=gray30,fillcolor=gray95"
# Print usage and exit
diff --git a/tools/testing/selftests/bpf/Makefile.docs b/tools/testing/selftests/bpf/Makefile.docs
index eb6a4fea8c79..f7f9e7088bb3 100644
--- a/tools/testing/selftests/bpf/Makefile.docs
+++ b/tools/testing/selftests/bpf/Makefile.docs
@@ -7,12 +7,6 @@ INSTALL ?= install
RM ?= rm -f
RMDIR ?= rmdir --ignore-fail-on-non-empty
-ifeq ($(V),1)
- Q =
-else
- Q = @
-endif
-
prefix ?= /usr/local
mandir ?= $(prefix)/man
man2dir = $(mandir)/man2
diff --git a/tools/testing/selftests/drivers/net/hds.py b/tools/testing/selftests/drivers/net/hds.py
index 394971b25c0b..873f5219e41d 100755
--- a/tools/testing/selftests/drivers/net/hds.py
+++ b/tools/testing/selftests/drivers/net/hds.py
@@ -2,17 +2,54 @@
# SPDX-License-Identifier: GPL-2.0
import errno
+import os
from lib.py import ksft_run, ksft_exit, ksft_eq, ksft_raises, KsftSkipEx
-from lib.py import EthtoolFamily, NlError
+from lib.py import CmdExitFailure, EthtoolFamily, NlError
from lib.py import NetDrvEnv
+from lib.py import defer, ethtool, ip
-def get_hds(cfg, netnl) -> None:
+
+def _get_hds_mode(cfg, netnl) -> str:
try:
rings = netnl.rings_get({'header': {'dev-index': cfg.ifindex}})
except NlError as e:
raise KsftSkipEx('ring-get not supported by device')
if 'tcp-data-split' not in rings:
raise KsftSkipEx('tcp-data-split not supported by device')
+ return rings['tcp-data-split']
+
+
+def _xdp_onoff(cfg):
+ test_dir = os.path.dirname(os.path.realpath(__file__))
+ prog = test_dir + "/../../net/lib/xdp_dummy.bpf.o"
+ ip("link set dev %s xdp obj %s sec xdp" %
+ (cfg.ifname, prog))
+ ip("link set dev %s xdp off" % cfg.ifname)
+
+
+def _ioctl_ringparam_modify(cfg, netnl) -> None:
+ """
+ Helper for performing a hopefully unimportant IOCTL SET.
+ IOCTL does not support HDS, so it should not affect the HDS config.
+ """
+ try:
+ rings = netnl.rings_get({'header': {'dev-index': cfg.ifindex}})
+ except NlError as e:
+ raise KsftSkipEx('ring-get not supported by device')
+
+ if 'tx' not in rings:
+ raise KsftSkipEx('setting Tx ring size not supported')
+
+ try:
+ ethtool(f"--disable-netlink -G {cfg.ifname} tx {rings['tx'] // 2}")
+ except CmdExitFailure as e:
+ ethtool(f"--disable-netlink -G {cfg.ifname} tx {rings['tx'] * 2}")
+ defer(ethtool, f"-G {cfg.ifname} tx {rings['tx']}")
+
+
+def get_hds(cfg, netnl) -> None:
+ _get_hds_mode(cfg, netnl)
+
def get_hds_thresh(cfg, netnl) -> None:
try:
@@ -104,6 +141,103 @@ def set_hds_thresh_gt(cfg, netnl) -> None:
netnl.rings_set({'header': {'dev-index': cfg.ifindex}, 'hds-thresh': hds_gt})
ksft_eq(e.exception.nl_msg.error, -errno.EINVAL)
+
+def set_xdp(cfg, netnl) -> None:
+ """
+ Enable single-buffer XDP on the device.
+ When HDS is in "auto" / UNKNOWN mode, XDP installation should work.
+ """
+ mode = _get_hds_mode(cfg, netnl)
+ if mode == 'enabled':
+ netnl.rings_set({'header': {'dev-index': cfg.ifindex},
+ 'tcp-data-split': 'unknown'})
+
+ _xdp_onoff(cfg)
+
+
+def enabled_set_xdp(cfg, netnl) -> None:
+ """
+ Enable single-buffer XDP on the device.
+ When HDS is in "enabled" mode, XDP installation should not work.
+ """
+ _get_hds_mode(cfg, netnl)
+ netnl.rings_set({'header': {'dev-index': cfg.ifindex},
+ 'tcp-data-split': 'enabled'})
+
+ defer(netnl.rings_set, {'header': {'dev-index': cfg.ifindex},
+ 'tcp-data-split': 'unknown'})
+
+ with ksft_raises(CmdExitFailure) as e:
+ _xdp_onoff(cfg)
+
+
+def set_xdp(cfg, netnl) -> None:
+ """
+ Enable single-buffer XDP on the device.
+ When HDS is in "auto" / UNKNOWN mode, XDP installation should work.
+ """
+ mode = _get_hds_mode(cfg, netnl)
+ if mode == 'enabled':
+ netnl.rings_set({'header': {'dev-index': cfg.ifindex},
+ 'tcp-data-split': 'unknown'})
+
+ _xdp_onoff(cfg)
+
+
+def enabled_set_xdp(cfg, netnl) -> None:
+ """
+ Enable single-buffer XDP on the device.
+ When HDS is in "enabled" mode, XDP installation should not work.
+ """
+ _get_hds_mode(cfg, netnl) # Trigger skip if not supported
+
+ netnl.rings_set({'header': {'dev-index': cfg.ifindex},
+ 'tcp-data-split': 'enabled'})
+ defer(netnl.rings_set, {'header': {'dev-index': cfg.ifindex},
+ 'tcp-data-split': 'unknown'})
+
+ with ksft_raises(CmdExitFailure) as e:
+ _xdp_onoff(cfg)
+
+
+def ioctl(cfg, netnl) -> None:
+ mode1 = _get_hds_mode(cfg, netnl)
+ _ioctl_ringparam_modify(cfg, netnl)
+ mode2 = _get_hds_mode(cfg, netnl)
+
+ ksft_eq(mode1, mode2)
+
+
+def ioctl_set_xdp(cfg, netnl) -> None:
+ """
+ Like set_xdp(), but we perturb the settings via the legacy ioctl.
+ """
+ mode = _get_hds_mode(cfg, netnl)
+ if mode == 'enabled':
+ netnl.rings_set({'header': {'dev-index': cfg.ifindex},
+ 'tcp-data-split': 'unknown'})
+
+ _ioctl_ringparam_modify(cfg, netnl)
+
+ _xdp_onoff(cfg)
+
+
+def ioctl_enabled_set_xdp(cfg, netnl) -> None:
+ """
+ Enable single-buffer XDP on the device.
+ When HDS is in "enabled" mode, XDP installation should not work.
+ """
+ _get_hds_mode(cfg, netnl) # Trigger skip if not supported
+
+ netnl.rings_set({'header': {'dev-index': cfg.ifindex},
+ 'tcp-data-split': 'enabled'})
+ defer(netnl.rings_set, {'header': {'dev-index': cfg.ifindex},
+ 'tcp-data-split': 'unknown'})
+
+ with ksft_raises(CmdExitFailure) as e:
+ _xdp_onoff(cfg)
+
+
def main() -> None:
with NetDrvEnv(__file__, queue_count=3) as cfg:
ksft_run([get_hds,
@@ -112,7 +246,12 @@ def main() -> None:
set_hds_enable,
set_hds_thresh_zero,
set_hds_thresh_max,
- set_hds_thresh_gt],
+ set_hds_thresh_gt,
+ set_xdp,
+ enabled_set_xdp,
+ ioctl,
+ ioctl_set_xdp,
+ ioctl_enabled_set_xdp],
args=(cfg, EthtoolFamily()))
ksft_exit()
diff --git a/tools/testing/selftests/drivers/net/queues.py b/tools/testing/selftests/drivers/net/queues.py
index 38303da957ee..8a518905a9f9 100755
--- a/tools/testing/selftests/drivers/net/queues.py
+++ b/tools/testing/selftests/drivers/net/queues.py
@@ -45,10 +45,9 @@ def addremove_queues(cfg, nl) -> None:
netnl = EthtoolFamily()
channels = netnl.channels_get({'header': {'dev-index': cfg.ifindex}})
- if channels['combined-count'] == 0:
- rx_type = 'rx'
- else:
- rx_type = 'combined'
+ rx_type = 'rx'
+ if channels.get('combined-count', 0) > 0:
+ rx_type = 'combined'
expected = curr_queues - 1
cmd(f"ethtool -L {cfg.dev['ifname']} {rx_type} {expected}", timeout=10)
diff --git a/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_fprobe.tc b/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_fprobe.tc
index 449f9d8be746..73f6c6fcecab 100644
--- a/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_fprobe.tc
+++ b/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_fprobe.tc
@@ -10,12 +10,16 @@ PLACE=$FUNCTION_FORK
PLACE2="kmem_cache_free"
PLACE3="schedule_timeout"
+# Some functions may have BPF programs attached, therefore
+# count already enabled_functions before tests start
+ocnt=`cat enabled_functions | wc -l`
+
echo "f:myevent1 $PLACE" >> dynamic_events
# Make sure the event is attached and is the only one
grep -q $PLACE enabled_functions
cnt=`cat enabled_functions | wc -l`
-if [ $cnt -ne 1 ]; then
+if [ $cnt -ne $((ocnt + 1)) ]; then
exit_fail
fi
@@ -23,7 +27,7 @@ echo "f:myevent2 $PLACE%return" >> dynamic_events
# It should till be the only attached function
cnt=`cat enabled_functions | wc -l`
-if [ $cnt -ne 1 ]; then
+if [ $cnt -ne $((ocnt + 1)) ]; then
exit_fail
fi
@@ -32,7 +36,7 @@ echo "f:myevent3 $PLACE2" >> dynamic_events
grep -q $PLACE2 enabled_functions
cnt=`cat enabled_functions | wc -l`
-if [ $cnt -ne 2 ]; then
+if [ $cnt -ne $((ocnt + 2)) ]; then
exit_fail
fi
@@ -49,7 +53,7 @@ grep -q myevent1 dynamic_events
# should still have 2 left
cnt=`cat enabled_functions | wc -l`
-if [ $cnt -ne 2 ]; then
+if [ $cnt -ne $((ocnt + 2)) ]; then
exit_fail
fi
@@ -57,7 +61,7 @@ echo > dynamic_events
# Should have none left
cnt=`cat enabled_functions | wc -l`
-if [ $cnt -ne 0 ]; then
+if [ $cnt -ne $ocnt ]; then
exit_fail
fi
@@ -65,7 +69,7 @@ echo "f:myevent4 $PLACE" >> dynamic_events
# Should only have one enabled
cnt=`cat enabled_functions | wc -l`
-if [ $cnt -ne 1 ]; then
+if [ $cnt -ne $((ocnt + 1)) ]; then
exit_fail
fi
@@ -73,7 +77,7 @@ echo > dynamic_events
# Should have none left
cnt=`cat enabled_functions | wc -l`
-if [ $cnt -ne 0 ]; then
+if [ $cnt -ne $ocnt ]; then
exit_fail
fi
diff --git a/tools/testing/selftests/hid/Makefile b/tools/testing/selftests/hid/Makefile
index 0336353bd15f..2839d2612ce3 100644
--- a/tools/testing/selftests/hid/Makefile
+++ b/tools/testing/selftests/hid/Makefile
@@ -43,10 +43,8 @@ TEST_GEN_PROGS = hid_bpf hidraw
# $3 - target (assumed to be file); only file name will be emitted;
# $4 - optional extra arg, emitted as-is, if provided.
ifeq ($(V),1)
-Q =
msg =
else
-Q = @
msg = @printf ' %-8s%s %s%s\n' "$(1)" "$(if $(2), [$(2)])" "$(notdir $(3))" "$(if $(4), $(4))";
MAKEFLAGS += --no-print-directory
submake_extras := feature_display=0
diff --git a/tools/testing/selftests/landlock/.gitignore b/tools/testing/selftests/landlock/.gitignore
index 470203a7cd73..335b2b1a3463 100644
--- a/tools/testing/selftests/landlock/.gitignore
+++ b/tools/testing/selftests/landlock/.gitignore
@@ -1,2 +1,4 @@
/*_test
+/sandbox-and-launch
/true
+/wait-pipe
diff --git a/tools/testing/selftests/landlock/common.h b/tools/testing/selftests/landlock/common.h
index a604ea5d8297..6064c9ac0532 100644
--- a/tools/testing/selftests/landlock/common.h
+++ b/tools/testing/selftests/landlock/common.h
@@ -207,6 +207,7 @@ enforce_ruleset(struct __test_metadata *const _metadata, const int ruleset_fd)
struct protocol_variant {
int domain;
int type;
+ int protocol;
};
struct service_fixture {
diff --git a/tools/testing/selftests/landlock/config b/tools/testing/selftests/landlock/config
index 29af19c4e9f9..425de4c20271 100644
--- a/tools/testing/selftests/landlock/config
+++ b/tools/testing/selftests/landlock/config
@@ -1,8 +1,11 @@
+CONFIG_AF_UNIX_OOB=y
CONFIG_CGROUPS=y
CONFIG_CGROUP_SCHED=y
CONFIG_INET=y
CONFIG_IPV6=y
CONFIG_KEYS=y
+CONFIG_MPTCP=y
+CONFIG_MPTCP_IPV6=y
CONFIG_NET=y
CONFIG_NET_NS=y
CONFIG_OVERLAY_FS=y
diff --git a/tools/testing/selftests/landlock/net_test.c b/tools/testing/selftests/landlock/net_test.c
index 4e0aeb53b225..d9de0ee49ebc 100644
--- a/tools/testing/selftests/landlock/net_test.c
+++ b/tools/testing/selftests/landlock/net_test.c
@@ -85,18 +85,18 @@ static void setup_loopback(struct __test_metadata *const _metadata)
clear_ambient_cap(_metadata, CAP_NET_ADMIN);
}
+static bool prot_is_tcp(const struct protocol_variant *const prot)
+{
+ return (prot->domain == AF_INET || prot->domain == AF_INET6) &&
+ prot->type == SOCK_STREAM &&
+ (prot->protocol == IPPROTO_TCP || prot->protocol == IPPROTO_IP);
+}
+
static bool is_restricted(const struct protocol_variant *const prot,
const enum sandbox_type sandbox)
{
- switch (prot->domain) {
- case AF_INET:
- case AF_INET6:
- switch (prot->type) {
- case SOCK_STREAM:
- return sandbox == TCP_SANDBOX;
- }
- break;
- }
+ if (sandbox == TCP_SANDBOX)
+ return prot_is_tcp(prot);
return false;
}
@@ -105,7 +105,7 @@ static int socket_variant(const struct service_fixture *const srv)
int ret;
ret = socket(srv->protocol.domain, srv->protocol.type | SOCK_CLOEXEC,
- 0);
+ srv->protocol.protocol);
if (ret < 0)
return -errno;
return ret;
@@ -290,22 +290,70 @@ FIXTURE_TEARDOWN(protocol)
}
/* clang-format off */
-FIXTURE_VARIANT_ADD(protocol, no_sandbox_with_ipv4_tcp) {
+FIXTURE_VARIANT_ADD(protocol, no_sandbox_with_ipv4_tcp1) {
/* clang-format on */
.sandbox = NO_SANDBOX,
.prot = {
.domain = AF_INET,
.type = SOCK_STREAM,
+ /* IPPROTO_IP == 0 */
+ .protocol = IPPROTO_IP,
},
};
/* clang-format off */
-FIXTURE_VARIANT_ADD(protocol, no_sandbox_with_ipv6_tcp) {
+FIXTURE_VARIANT_ADD(protocol, no_sandbox_with_ipv4_tcp2) {
+ /* clang-format on */
+ .sandbox = NO_SANDBOX,
+ .prot = {
+ .domain = AF_INET,
+ .type = SOCK_STREAM,
+ .protocol = IPPROTO_TCP,
+ },
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(protocol, no_sandbox_with_ipv4_mptcp) {
+ /* clang-format on */
+ .sandbox = NO_SANDBOX,
+ .prot = {
+ .domain = AF_INET,
+ .type = SOCK_STREAM,
+ .protocol = IPPROTO_MPTCP,
+ },
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(protocol, no_sandbox_with_ipv6_tcp1) {
+ /* clang-format on */
+ .sandbox = NO_SANDBOX,
+ .prot = {
+ .domain = AF_INET6,
+ .type = SOCK_STREAM,
+ /* IPPROTO_IP == 0 */
+ .protocol = IPPROTO_IP,
+ },
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(protocol, no_sandbox_with_ipv6_tcp2) {
/* clang-format on */
.sandbox = NO_SANDBOX,
.prot = {
.domain = AF_INET6,
.type = SOCK_STREAM,
+ .protocol = IPPROTO_TCP,
+ },
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(protocol, no_sandbox_with_ipv6_mptcp) {
+ /* clang-format on */
+ .sandbox = NO_SANDBOX,
+ .prot = {
+ .domain = AF_INET6,
+ .type = SOCK_STREAM,
+ .protocol = IPPROTO_MPTCP,
},
};
@@ -350,22 +398,70 @@ FIXTURE_VARIANT_ADD(protocol, no_sandbox_with_unix_datagram) {
};
/* clang-format off */
-FIXTURE_VARIANT_ADD(protocol, tcp_sandbox_with_ipv4_tcp) {
+FIXTURE_VARIANT_ADD(protocol, tcp_sandbox_with_ipv4_tcp1) {
+ /* clang-format on */
+ .sandbox = TCP_SANDBOX,
+ .prot = {
+ .domain = AF_INET,
+ .type = SOCK_STREAM,
+ /* IPPROTO_IP == 0 */
+ .protocol = IPPROTO_IP,
+ },
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(protocol, tcp_sandbox_with_ipv4_tcp2) {
+ /* clang-format on */
+ .sandbox = TCP_SANDBOX,
+ .prot = {
+ .domain = AF_INET,
+ .type = SOCK_STREAM,
+ .protocol = IPPROTO_TCP,
+ },
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(protocol, tcp_sandbox_with_ipv4_mptcp) {
/* clang-format on */
.sandbox = TCP_SANDBOX,
.prot = {
.domain = AF_INET,
.type = SOCK_STREAM,
+ .protocol = IPPROTO_MPTCP,
+ },
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(protocol, tcp_sandbox_with_ipv6_tcp1) {
+ /* clang-format on */
+ .sandbox = TCP_SANDBOX,
+ .prot = {
+ .domain = AF_INET6,
+ .type = SOCK_STREAM,
+ /* IPPROTO_IP == 0 */
+ .protocol = IPPROTO_IP,
+ },
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(protocol, tcp_sandbox_with_ipv6_tcp2) {
+ /* clang-format on */
+ .sandbox = TCP_SANDBOX,
+ .prot = {
+ .domain = AF_INET6,
+ .type = SOCK_STREAM,
+ .protocol = IPPROTO_TCP,
},
};
/* clang-format off */
-FIXTURE_VARIANT_ADD(protocol, tcp_sandbox_with_ipv6_tcp) {
+FIXTURE_VARIANT_ADD(protocol, tcp_sandbox_with_ipv6_mptcp) {
/* clang-format on */
.sandbox = TCP_SANDBOX,
.prot = {
.domain = AF_INET6,
.type = SOCK_STREAM,
+ .protocol = IPPROTO_MPTCP,
},
};
diff --git a/tools/testing/selftests/net/lib/Makefile b/tools/testing/selftests/net/lib/Makefile
index bc6b6762baf3..c22623b9a2a5 100644
--- a/tools/testing/selftests/net/lib/Makefile
+++ b/tools/testing/selftests/net/lib/Makefile
@@ -9,7 +9,10 @@ TEST_FILES := ../../../../../Documentation/netlink/specs
TEST_FILES += ../../../../net/ynl
TEST_GEN_FILES += csum
+TEST_GEN_FILES += $(patsubst %.c,%.o,$(wildcard *.bpf.c))
TEST_INCLUDES := $(wildcard py/*.py sh/*.sh)
include ../../lib.mk
+
+include ../bpf.mk
diff --git a/tools/testing/selftests/net/lib/xdp_dummy.bpf.c b/tools/testing/selftests/net/lib/xdp_dummy.bpf.c
new file mode 100644
index 000000000000..d988b2e0cee8
--- /dev/null
+++ b/tools/testing/selftests/net/lib/xdp_dummy.bpf.c
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#define KBUILD_MODNAME "xdp_dummy"
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+SEC("xdp")
+int xdp_dummy_prog(struct xdp_md *ctx)
+{
+ return XDP_PASS;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/rseq/rseq-riscv-bits.h b/tools/testing/selftests/rseq/rseq-riscv-bits.h
index de31a0143139..f02f411d550d 100644
--- a/tools/testing/selftests/rseq/rseq-riscv-bits.h
+++ b/tools/testing/selftests/rseq/rseq-riscv-bits.h
@@ -243,7 +243,7 @@ int RSEQ_TEMPLATE_IDENTIFIER(rseq_offset_deref_addv)(intptr_t *ptr, off_t off, i
#ifdef RSEQ_COMPARE_TWICE
RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, "%l[error1]")
#endif
- RSEQ_ASM_OP_R_DEREF_ADDV(ptr, off, 3)
+ RSEQ_ASM_OP_R_DEREF_ADDV(ptr, off, inc, 3)
RSEQ_INJECT_ASM(4)
RSEQ_ASM_DEFINE_ABORT(4, abort)
: /* gcc asm goto does not allow outputs */
@@ -251,8 +251,8 @@ int RSEQ_TEMPLATE_IDENTIFIER(rseq_offset_deref_addv)(intptr_t *ptr, off_t off, i
[current_cpu_id] "m" (rseq_get_abi()->RSEQ_TEMPLATE_CPU_ID_FIELD),
[rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
[ptr] "r" (ptr),
- [off] "er" (off),
- [inc] "er" (inc)
+ [off] "r" (off),
+ [inc] "r" (inc)
RSEQ_INJECT_INPUT
: "memory", RSEQ_ASM_TMP_REG_1
RSEQ_INJECT_CLOBBER
diff --git a/tools/testing/selftests/rseq/rseq-riscv.h b/tools/testing/selftests/rseq/rseq-riscv.h
index 37e598d0a365..67d544aaa9a3 100644
--- a/tools/testing/selftests/rseq/rseq-riscv.h
+++ b/tools/testing/selftests/rseq/rseq-riscv.h
@@ -158,7 +158,7 @@ do { \
"bnez " RSEQ_ASM_TMP_REG_1 ", 222b\n" \
"333:\n"
-#define RSEQ_ASM_OP_R_DEREF_ADDV(ptr, off, post_commit_label) \
+#define RSEQ_ASM_OP_R_DEREF_ADDV(ptr, off, inc, post_commit_label) \
"mv " RSEQ_ASM_TMP_REG_1 ", %[" __rseq_str(ptr) "]\n" \
RSEQ_ASM_OP_R_ADD(off) \
REG_L RSEQ_ASM_TMP_REG_1 ", 0(" RSEQ_ASM_TMP_REG_1 ")\n" \
diff --git a/tools/thermal/lib/Makefile b/tools/thermal/lib/Makefile
index f2552f73a64c..056d212f25cf 100644
--- a/tools/thermal/lib/Makefile
+++ b/tools/thermal/lib/Makefile
@@ -39,19 +39,6 @@ libdir = $(prefix)/$(libdir_relative)
libdir_SQ = $(subst ','\'',$(libdir))
libdir_relative_SQ = $(subst ','\'',$(libdir_relative))
-ifeq ("$(origin V)", "command line")
- VERBOSE = $(V)
-endif
-ifndef VERBOSE
- VERBOSE = 0
-endif
-
-ifeq ($(VERBOSE),1)
- Q =
-else
- Q = @
-endif
-
# Set compile option CFLAGS
ifdef EXTRA_CFLAGS
CFLAGS := $(EXTRA_CFLAGS)
diff --git a/tools/tracing/latency/Makefile b/tools/tracing/latency/Makefile
index 6518b03e05c7..257a56b1899f 100644
--- a/tools/tracing/latency/Makefile
+++ b/tools/tracing/latency/Makefile
@@ -37,12 +37,6 @@ FEATURE_TESTS += libtracefs
FEATURE_DISPLAY := libtraceevent
FEATURE_DISPLAY += libtracefs
-ifeq ($(V),1)
- Q =
-else
- Q = @
-endif
-
all: $(LATENCY-COLLECTOR)
include $(srctree)/tools/build/Makefile.include
diff --git a/tools/tracing/rtla/Makefile b/tools/tracing/rtla/Makefile
index 8b5101457c70..0b61208db604 100644
--- a/tools/tracing/rtla/Makefile
+++ b/tools/tracing/rtla/Makefile
@@ -37,12 +37,6 @@ FEATURE_DISPLAY := libtraceevent
FEATURE_DISPLAY += libtracefs
FEATURE_DISPLAY += libcpupower
-ifeq ($(V),1)
- Q =
-else
- Q = @
-endif
-
all: $(RTLA)
include $(srctree)/tools/build/Makefile.include
diff --git a/tools/verification/rv/Makefile b/tools/verification/rv/Makefile
index 411d62b3d8eb..5b898360ba48 100644
--- a/tools/verification/rv/Makefile
+++ b/tools/verification/rv/Makefile
@@ -35,12 +35,6 @@ FEATURE_TESTS += libtracefs
FEATURE_DISPLAY := libtraceevent
FEATURE_DISPLAY += libtracefs
-ifeq ($(V),1)
- Q =
-else
- Q = @
-endif
-
all: $(RV)
include $(srctree)/tools/build/Makefile.include