diff options
291 files changed, 3691 insertions, 1369 deletions
diff --git a/.clang-format b/.clang-format index e6080f5834a3..bc2ffb2a0b53 100644 --- a/.clang-format +++ b/.clang-format @@ -72,6 +72,10 @@ ForEachMacros: - 'apei_estatus_for_each_section' - 'ata_for_each_dev' - 'ata_for_each_link' + - '__ata_qc_for_each' + - 'ata_qc_for_each' + - 'ata_qc_for_each_raw' + - 'ata_qc_for_each_with_internal' - 'ax25_for_each' - 'ax25_uid_for_each' - 'bio_for_each_integrity_vec' @@ -85,6 +89,7 @@ ForEachMacros: - 'blk_queue_for_each_rl' - 'bond_for_each_slave' - 'bond_for_each_slave_rcu' + - 'bpf_for_each_spilled_reg' - 'btree_for_each_safe128' - 'btree_for_each_safe32' - 'btree_for_each_safe64' @@ -103,6 +108,8 @@ ForEachMacros: - 'drm_atomic_crtc_for_each_plane' - 'drm_atomic_crtc_state_for_each_plane' - 'drm_atomic_crtc_state_for_each_plane_state' + - 'drm_atomic_for_each_plane_damage' + - 'drm_connector_for_each_possible_encoder' - 'drm_for_each_connector_iter' - 'drm_for_each_crtc' - 'drm_for_each_encoder' @@ -121,11 +128,21 @@ ForEachMacros: - 'for_each_bio' - 'for_each_board_func_rsrc' - 'for_each_bvec' + - 'for_each_card_components' + - 'for_each_card_links' + - 'for_each_card_links_safe' + - 'for_each_card_prelinks' + - 'for_each_card_rtds' + - 'for_each_card_rtds_safe' + - 'for_each_cgroup_storage_type' - 'for_each_child_of_node' - 'for_each_clear_bit' - 'for_each_clear_bit_from' - 'for_each_cmsghdr' - 'for_each_compatible_node' + - 'for_each_component_dais' + - 'for_each_component_dais_safe' + - 'for_each_comp_order' - 'for_each_console' - 'for_each_cpu' - 'for_each_cpu_and' @@ -133,6 +150,10 @@ ForEachMacros: - 'for_each_cpu_wrap' - 'for_each_dev_addr' - 'for_each_dma_cap_mask' + - 'for_each_dpcm_be' + - 'for_each_dpcm_be_rollback' + - 'for_each_dpcm_be_safe' + - 'for_each_dpcm_fe' - 'for_each_drhd_unit' - 'for_each_dss_dev' - 'for_each_efi_memory_desc' @@ -149,6 +170,7 @@ ForEachMacros: - 'for_each_iommu' - 'for_each_ip_tunnel_rcu' - 'for_each_irq_nr' + - 'for_each_link_codecs' - 'for_each_lru' - 'for_each_matching_node' - 'for_each_matching_node_and_match' @@ -160,6 +182,7 @@ ForEachMacros: - 'for_each_mem_range_rev' - 'for_each_migratetype_order' - 'for_each_msi_entry' + - 'for_each_msi_entry_safe' - 'for_each_net' - 'for_each_netdev' - 'for_each_netdev_continue' @@ -183,12 +206,14 @@ ForEachMacros: - 'for_each_node_with_property' - 'for_each_of_allnodes' - 'for_each_of_allnodes_from' + - 'for_each_of_cpu_node' - 'for_each_of_pci_range' - 'for_each_old_connector_in_state' - 'for_each_old_crtc_in_state' - 'for_each_oldnew_connector_in_state' - 'for_each_oldnew_crtc_in_state' - 'for_each_oldnew_plane_in_state' + - 'for_each_oldnew_plane_in_state_reverse' - 'for_each_oldnew_private_obj_in_state' - 'for_each_old_plane_in_state' - 'for_each_old_private_obj_in_state' @@ -206,14 +231,17 @@ ForEachMacros: - 'for_each_process' - 'for_each_process_thread' - 'for_each_property_of_node' + - 'for_each_registered_fb' - 'for_each_reserved_mem_region' - - 'for_each_resv_unavail_range' + - 'for_each_rtd_codec_dai' + - 'for_each_rtd_codec_dai_rollback' - 'for_each_rtdcom' - 'for_each_rtdcom_safe' - 'for_each_set_bit' - 'for_each_set_bit_from' - 'for_each_sg' - 'for_each_sg_page' + - 'for_each_sibling_event' - '__for_each_thread' - 'for_each_thread' - 'for_each_zone' @@ -251,6 +279,8 @@ ForEachMacros: - 'hlist_nulls_for_each_entry_from' - 'hlist_nulls_for_each_entry_rcu' - 'hlist_nulls_for_each_entry_safe' + - 'i3c_bus_for_each_i2cdev' + - 'i3c_bus_for_each_i3cdev' - 'ide_host_for_each_port' - 'ide_port_for_each_dev' - 'ide_port_for_each_present_dev' @@ -267,11 +297,14 @@ ForEachMacros: - 'kvm_for_each_memslot' - 'kvm_for_each_vcpu' - 'list_for_each' + - 'list_for_each_codec' + - 'list_for_each_codec_safe' - 'list_for_each_entry' - 'list_for_each_entry_continue' - 'list_for_each_entry_continue_rcu' - 'list_for_each_entry_continue_reverse' - 'list_for_each_entry_from' + - 'list_for_each_entry_from_rcu' - 'list_for_each_entry_from_reverse' - 'list_for_each_entry_lockless' - 'list_for_each_entry_rcu' @@ -291,6 +324,7 @@ ForEachMacros: - 'media_device_for_each_intf' - 'media_device_for_each_link' - 'media_device_for_each_pad' + - 'nanddev_io_for_each_page' - 'netdev_for_each_lower_dev' - 'netdev_for_each_lower_private' - 'netdev_for_each_lower_private_rcu' @@ -357,12 +391,14 @@ ForEachMacros: - 'sk_nulls_for_each' - 'sk_nulls_for_each_from' - 'sk_nulls_for_each_rcu' + - 'snd_array_for_each' - 'snd_pcm_group_for_each_entry' - 'snd_soc_dapm_widget_for_each_path' - 'snd_soc_dapm_widget_for_each_path_safe' - 'snd_soc_dapm_widget_for_each_sink_path' - 'snd_soc_dapm_widget_for_each_source_path' - 'tb_property_for_each' + - 'tcf_exts_for_each_action' - 'udp_portaddr_for_each_entry' - 'udp_portaddr_for_each_entry_rcu' - 'usb_hub_for_each_child' @@ -371,6 +407,11 @@ ForEachMacros: - 'v4l2_m2m_for_each_dst_buf_safe' - 'v4l2_m2m_for_each_src_buf' - 'v4l2_m2m_for_each_src_buf_safe' + - 'virtio_device_for_each_vq' + - 'xa_for_each' + - 'xas_for_each' + - 'xas_for_each_conflict' + - 'xas_for_each_marked' - 'zorro_for_each_dev' #IncludeBlocks: Preserve # Unknown to clang-format-5.0 diff --git a/Documentation/devicetree/bindings/arm/cpu-capacity.txt b/Documentation/devicetree/bindings/arm/cpu-capacity.txt index 84262cdb8d29..96fa46cb133c 100644 --- a/Documentation/devicetree/bindings/arm/cpu-capacity.txt +++ b/Documentation/devicetree/bindings/arm/cpu-capacity.txt @@ -235,4 +235,4 @@ cpus { =========================================== [1] ARM Linux Kernel documentation - CPUs bindings - Documentation/devicetree/bindings/arm/cpus.txt + Documentation/devicetree/bindings/arm/cpus.yaml diff --git a/Documentation/devicetree/bindings/arm/idle-states.txt b/Documentation/devicetree/bindings/arm/idle-states.txt index 8f0937db55c5..45730ba60af5 100644 --- a/Documentation/devicetree/bindings/arm/idle-states.txt +++ b/Documentation/devicetree/bindings/arm/idle-states.txt @@ -684,7 +684,7 @@ cpus { =========================================== [1] ARM Linux Kernel documentation - CPUs bindings - Documentation/devicetree/bindings/arm/cpus.txt + Documentation/devicetree/bindings/arm/cpus.yaml [2] ARM Linux Kernel documentation - PSCI bindings Documentation/devicetree/bindings/arm/psci.txt diff --git a/Documentation/devicetree/bindings/arm/sp810.txt b/Documentation/devicetree/bindings/arm/sp810.txt index 1b2ab1ff5587..46652bf65147 100644 --- a/Documentation/devicetree/bindings/arm/sp810.txt +++ b/Documentation/devicetree/bindings/arm/sp810.txt @@ -4,7 +4,7 @@ SP810 System Controller Required properties: - compatible: standard compatible string for a Primecell peripheral, - see Documentation/devicetree/bindings/arm/primecell.txt + see Documentation/devicetree/bindings/arm/primecell.yaml for more details should be: "arm,sp810", "arm,primecell" diff --git a/Documentation/devicetree/bindings/arm/topology.txt b/Documentation/devicetree/bindings/arm/topology.txt index de9eb0486630..b0d80c0fb265 100644 --- a/Documentation/devicetree/bindings/arm/topology.txt +++ b/Documentation/devicetree/bindings/arm/topology.txt @@ -472,4 +472,4 @@ cpus { =============================================================================== [1] ARM Linux kernel documentation - Documentation/devicetree/bindings/arm/cpus.txt + Documentation/devicetree/bindings/arm/cpus.yaml diff --git a/Documentation/devicetree/bindings/clock/marvell,mmp2.txt b/Documentation/devicetree/bindings/clock/marvell,mmp2.txt index af376a01f2b7..23b52dc02266 100644 --- a/Documentation/devicetree/bindings/clock/marvell,mmp2.txt +++ b/Documentation/devicetree/bindings/clock/marvell,mmp2.txt @@ -18,4 +18,4 @@ Required Properties: Each clock is assigned an identifier and client nodes use this identifier to specify the clock which they consume. -All these identifier could be found in <dt-bindings/clock/marvell-mmp2.h>. +All these identifiers could be found in <dt-bindings/clock/marvell,mmp2.h>. diff --git a/Documentation/devicetree/bindings/display/arm,pl11x.txt b/Documentation/devicetree/bindings/display/arm,pl11x.txt index ef89ab46b2c9..572fa2773ec4 100644 --- a/Documentation/devicetree/bindings/display/arm,pl11x.txt +++ b/Documentation/devicetree/bindings/display/arm,pl11x.txt @@ -1,6 +1,6 @@ * ARM PrimeCell Color LCD Controller PL110/PL111 -See also Documentation/devicetree/bindings/arm/primecell.txt +See also Documentation/devicetree/bindings/arm/primecell.yaml Required properties: diff --git a/Documentation/devicetree/bindings/gpio/gpio-mvebu.txt b/Documentation/devicetree/bindings/gpio/gpio-mvebu.txt index 38ca2201e8ae..2e097b57f170 100644 --- a/Documentation/devicetree/bindings/gpio/gpio-mvebu.txt +++ b/Documentation/devicetree/bindings/gpio/gpio-mvebu.txt @@ -14,8 +14,6 @@ Required properties: "marvell,armada-8k-gpio" should be used for the Armada 7K and 8K SoCs (either from AP or CP), see - Documentation/devicetree/bindings/arm/marvell/cp110-system-controller0.txt - and Documentation/devicetree/bindings/arm/marvell/ap806-system-controller.txt for specific details about the offset property. diff --git a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt index b83bb8249074..a3be5298a5eb 100644 --- a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt +++ b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt @@ -78,7 +78,7 @@ Sub-nodes: PPI affinity can be expressed as a single "ppi-partitions" node, containing a set of sub-nodes, each with the following property: - affinity: Should be a list of phandles to CPU nodes (as described in -Documentation/devicetree/bindings/arm/cpus.txt). + Documentation/devicetree/bindings/arm/cpus.yaml). GICv3 has one or more Interrupt Translation Services (ITS) that are used to route Message Signalled Interrupts (MSI) to the CPUs. diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,glink.txt b/Documentation/devicetree/bindings/soc/qcom/qcom,glink.txt index 0b8cc533ca83..cf759e5f9b10 100644 --- a/Documentation/devicetree/bindings/soc/qcom/qcom,glink.txt +++ b/Documentation/devicetree/bindings/soc/qcom/qcom,glink.txt @@ -55,7 +55,7 @@ of these nodes are defined by the individual bindings for the specific function = EXAMPLE The following example represents the GLINK RPM node on a MSM8996 device, with the function for the "rpm_request" channel defined, which is used for -regualtors and root clocks. +regulators and root clocks. apcs_glb: mailbox@9820000 { compatible = "qcom,msm8996-apcs-hmss-global"; diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,smp2p.txt b/Documentation/devicetree/bindings/soc/qcom/qcom,smp2p.txt index a35af2dafdad..49e1d72d3648 100644 --- a/Documentation/devicetree/bindings/soc/qcom/qcom,smp2p.txt +++ b/Documentation/devicetree/bindings/soc/qcom/qcom,smp2p.txt @@ -41,12 +41,12 @@ processor ID) and a string identifier. - qcom,local-pid: Usage: required Value type: <u32> - Definition: specifies the identfier of the local endpoint of this edge + Definition: specifies the identifier of the local endpoint of this edge - qcom,remote-pid: Usage: required Value type: <u32> - Definition: specifies the identfier of the remote endpoint of this edge + Definition: specifies the identifier of the remote endpoint of this edge = SUBNODES Each SMP2P pair contain a set of inbound and outbound entries, these are diff --git a/Documentation/fb/fbcon.txt b/Documentation/fb/fbcon.txt index 62af30511a95..60a5ec04e8f0 100644 --- a/Documentation/fb/fbcon.txt +++ b/Documentation/fb/fbcon.txt @@ -163,6 +163,14 @@ C. Boot options be preserved until there actually is some text is output to the console. This option causes fbcon to bind immediately to the fbdev device. +7. fbcon=logo-pos:<location> + + The only possible 'location' is 'center' (without quotes), and when + given, the bootup logo is moved from the default top-left corner + location to the center of the framebuffer. If more than one logo is + displayed due to multiple CPUs, the collected line of logos is moved + as a whole. + C. Attaching, Detaching and Unloading Before going on to how to attach, detach and unload the framebuffer console, an diff --git a/MAINTAINERS b/MAINTAINERS index 51029a425dbe..95be8f0779a3 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3052,8 +3052,8 @@ F: include/linux/bcm963xx_nvram.h F: include/linux/bcm963xx_tag.h BROADCOM BNX2 GIGABIT ETHERNET DRIVER -M: Rasesh Mody <rasesh.mody@cavium.com> -M: Dept-GELinuxNICDev@cavium.com +M: Rasesh Mody <rmody@marvell.com> +M: GR-Linux-NIC-Dev@marvell.com L: netdev@vger.kernel.org S: Supported F: drivers/net/ethernet/broadcom/bnx2.* @@ -3072,9 +3072,9 @@ S: Supported F: drivers/scsi/bnx2i/ BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER -M: Ariel Elior <ariel.elior@cavium.com> -M: Sudarsana Kalluru <sudarsana.kalluru@cavium.com> -M: everest-linux-l2@cavium.com +M: Ariel Elior <aelior@marvell.com> +M: Sudarsana Kalluru <skalluru@marvell.com> +M: GR-everest-linux-l2@marvell.com L: netdev@vger.kernel.org S: Supported F: drivers/net/ethernet/broadcom/bnx2x/ @@ -3249,9 +3249,9 @@ S: Supported F: drivers/scsi/bfa/ BROCADE BNA 10 GIGABIT ETHERNET DRIVER -M: Rasesh Mody <rasesh.mody@cavium.com> -M: Sudarsana Kalluru <sudarsana.kalluru@cavium.com> -M: Dept-GELinuxNICDev@cavium.com +M: Rasesh Mody <rmody@marvell.com> +M: Sudarsana Kalluru <skalluru@marvell.com> +M: GR-Linux-NIC-Dev@marvell.com L: netdev@vger.kernel.org S: Supported F: drivers/net/ethernet/brocade/bna/ @@ -10688,9 +10688,9 @@ S: Maintained F: drivers/net/netdevsim/* NETXEN (1/10) GbE SUPPORT -M: Manish Chopra <manish.chopra@cavium.com> -M: Rahul Verma <rahul.verma@cavium.com> -M: Dept-GELinuxNICDev@cavium.com +M: Manish Chopra <manishc@marvell.com> +M: Rahul Verma <rahulv@marvell.com> +M: GR-Linux-NIC-Dev@marvell.com L: netdev@vger.kernel.org S: Supported F: drivers/net/ethernet/qlogic/netxen/ @@ -12474,8 +12474,8 @@ S: Supported F: drivers/scsi/qedi/ QLOGIC QL4xxx ETHERNET DRIVER -M: Ariel Elior <Ariel.Elior@cavium.com> -M: everest-linux-l2@cavium.com +M: Ariel Elior <aelior@marvell.com> +M: GR-everest-linux-l2@marvell.com L: netdev@vger.kernel.org S: Supported F: drivers/net/ethernet/qlogic/qed/ @@ -12483,8 +12483,8 @@ F: include/linux/qed/ F: drivers/net/ethernet/qlogic/qede/ QLOGIC QL4xxx RDMA DRIVER -M: Michal Kalderon <Michal.Kalderon@cavium.com> -M: Ariel Elior <Ariel.Elior@cavium.com> +M: Michal Kalderon <mkalderon@marvell.com> +M: Ariel Elior <aelior@marvell.com> L: linux-rdma@vger.kernel.org S: Supported F: drivers/infiniband/hw/qedr/ @@ -12504,7 +12504,7 @@ F: Documentation/scsi/LICENSE.qla2xxx F: drivers/scsi/qla2xxx/ QLOGIC QLA3XXX NETWORK DRIVER -M: Dept-GELinuxNICDev@cavium.com +M: GR-Linux-NIC-Dev@marvell.com L: netdev@vger.kernel.org S: Supported F: Documentation/networking/device_drivers/qlogic/LICENSE.qla3xxx @@ -12518,16 +12518,16 @@ F: Documentation/scsi/LICENSE.qla4xxx F: drivers/scsi/qla4xxx/ QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER -M: Shahed Shaikh <Shahed.Shaikh@cavium.com> -M: Manish Chopra <manish.chopra@cavium.com> -M: Dept-GELinuxNICDev@cavium.com +M: Shahed Shaikh <shshaikh@marvell.com> +M: Manish Chopra <manishc@marvell.com> +M: GR-Linux-NIC-Dev@marvell.com L: netdev@vger.kernel.org S: Supported F: drivers/net/ethernet/qlogic/qlcnic/ QLOGIC QLGE 10Gb ETHERNET DRIVER -M: Manish Chopra <manish.chopra@cavium.com> -M: Dept-GELinuxNICDev@cavium.com +M: Manish Chopra <manishc@marvell.com> +M: GR-Linux-NIC-Dev@marvell.com L: netdev@vger.kernel.org S: Supported F: drivers/net/ethernet/qlogic/qlge/ @@ -2,7 +2,7 @@ VERSION = 5 PATCHLEVEL = 0 SUBLEVEL = 0 -EXTRAVERSION = -rc2 +EXTRAVERSION = -rc3 NAME = Shy Crocodile # *DOCUMENTATION* @@ -955,6 +955,7 @@ ifdef CONFIG_STACK_VALIDATION endif endif +PHONY += prepare0 ifeq ($(KBUILD_EXTMOD),) core-y += kernel/ certs/ mm/ fs/ ipc/ security/ crypto/ block/ @@ -1061,8 +1062,7 @@ scripts: scripts_basic scripts_dtc # archprepare is used in arch Makefiles and when processed asm symlink, # version.h and scripts_basic is processed / created. -# Listed in dependency order -PHONY += prepare archprepare prepare0 prepare1 prepare2 prepare3 +PHONY += prepare archprepare prepare1 prepare2 prepare3 # prepare3 is used to check if we are building in a separate output directory, # and if so do: @@ -1360,11 +1360,11 @@ mrproper: rm-dirs := $(wildcard $(MRPROPER_DIRS)) mrproper: rm-files := $(wildcard $(MRPROPER_FILES)) mrproper-dirs := $(addprefix _mrproper_,scripts) -PHONY += $(mrproper-dirs) mrproper archmrproper +PHONY += $(mrproper-dirs) mrproper $(mrproper-dirs): $(Q)$(MAKE) $(clean)=$(patsubst _mrproper_%,%,$@) -mrproper: clean archmrproper $(mrproper-dirs) +mrproper: clean $(mrproper-dirs) $(call cmd,rmdirs) $(call cmd,rmfiles) diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h index eb43e09c1980..926434f413fa 100644 --- a/arch/arm64/include/asm/cache.h +++ b/arch/arm64/include/asm/cache.h @@ -60,8 +60,6 @@ #ifdef CONFIG_KASAN_SW_TAGS #define ARCH_SLAB_MINALIGN (1ULL << KASAN_SHADOW_SCALE_SHIFT) -#else -#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) #endif #ifndef __ASSEMBLY__ diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index ac352accb3d9..3e8063f4f9d3 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h @@ -60,8 +60,11 @@ static inline bool arm64_kernel_use_ng_mappings(void) * later determine that kpti is required, then * kpti_install_ng_mappings() will make them non-global. */ + if (arm64_kernel_unmapped_at_el0()) + return true; + if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE)) - return arm64_kernel_unmapped_at_el0(); + return false; /* * KASLR is enabled so we're going to be enabling kpti on non-broken diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c index f0e6ab8abe9c..ba6b41790fcd 100644 --- a/arch/arm64/kernel/kaslr.c +++ b/arch/arm64/kernel/kaslr.c @@ -14,6 +14,7 @@ #include <linux/sched.h> #include <linux/types.h> +#include <asm/cacheflush.h> #include <asm/fixmap.h> #include <asm/kernel-pgtable.h> #include <asm/memory.h> @@ -43,7 +44,7 @@ static __init u64 get_kaslr_seed(void *fdt) return ret; } -static __init const u8 *get_cmdline(void *fdt) +static __init const u8 *kaslr_get_cmdline(void *fdt) { static __initconst const u8 default_cmdline[] = CONFIG_CMDLINE; @@ -109,7 +110,7 @@ u64 __init kaslr_early_init(u64 dt_phys) * Check if 'nokaslr' appears on the command line, and * return 0 if that is the case. */ - cmdline = get_cmdline(fdt); + cmdline = kaslr_get_cmdline(fdt); str = strstr(cmdline, "nokaslr"); if (str == cmdline || (str > cmdline && *(str - 1) == ' ')) return 0; @@ -169,5 +170,8 @@ u64 __init kaslr_early_init(u64 dt_phys) module_alloc_base += (module_range * (seed & ((1 << 21) - 1))) >> 21; module_alloc_base &= PAGE_MASK; + __flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base)); + __flush_dcache_area(&memstart_offset_seed, sizeof(memstart_offset_seed)); + return offset; } diff --git a/arch/h8300/Makefile b/arch/h8300/Makefile index 4003ddc616e1..f801f3708a89 100644 --- a/arch/h8300/Makefile +++ b/arch/h8300/Makefile @@ -37,8 +37,6 @@ libs-y += arch/$(ARCH)/lib/ boot := arch/h8300/boot -archmrproper: - archclean: $(Q)$(MAKE) $(clean)=$(boot) diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile index 320d86f192ee..171290f9f1de 100644 --- a/arch/ia64/Makefile +++ b/arch/ia64/Makefile @@ -16,8 +16,6 @@ KBUILD_DEFCONFIG := generic_defconfig NM := $(CROSS_COMPILE)nm -B READELF := $(CROSS_COMPILE)readelf -export AWK - CHECKFLAGS += -D__ia64=1 -D__ia64__=1 -D_LP64 -D__LP64__ OBJCOPYFLAGS := --strip-all diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 787290781b8c..0d14f51d0002 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -3155,6 +3155,7 @@ config MIPS32_O32 config MIPS32_N32 bool "Kernel support for n32 binaries" depends on 64BIT + select ARCH_WANT_COMPAT_IPC_PARSE_VERSION select COMPAT select MIPS32_COMPAT select SYSVIPC_COMPAT if SYSVIPC diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c index 6054d49e608e..fe3773539eff 100644 --- a/arch/mips/bcm47xx/setup.c +++ b/arch/mips/bcm47xx/setup.c @@ -173,6 +173,31 @@ void __init plat_mem_setup(void) pm_power_off = bcm47xx_machine_halt; } +#ifdef CONFIG_BCM47XX_BCMA +static struct device * __init bcm47xx_setup_device(void) +{ + struct device *dev; + int err; + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) + return NULL; + + err = dev_set_name(dev, "bcm47xx_soc"); + if (err) { + pr_err("Failed to set SoC device name: %d\n", err); + kfree(dev); + return NULL; + } + + err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32)); + if (err) + pr_err("Failed to set SoC DMA mask: %d\n", err); + + return dev; +} +#endif + /* * This finishes bus initialization doing things that were not possible without * kmalloc. Make sure to call it late enough (after mm_init). @@ -183,6 +208,10 @@ void __init bcm47xx_bus_setup(void) if (bcm47xx_bus_type == BCM47XX_BUS_TYPE_BCMA) { int err; + bcm47xx_bus.bcma.dev = bcm47xx_setup_device(); + if (!bcm47xx_bus.bcma.dev) + panic("Failed to setup SoC device\n"); + err = bcma_host_soc_init(&bcm47xx_bus.bcma); if (err) panic("Failed to initialize BCMA bus (err %d)", err); @@ -235,6 +264,8 @@ static int __init bcm47xx_register_bus_complete(void) #endif #ifdef CONFIG_BCM47XX_BCMA case BCM47XX_BUS_TYPE_BCMA: + if (device_register(bcm47xx_bus.bcma.dev)) + pr_err("Failed to register SoC device\n"); bcma_bus_register(&bcm47xx_bus.bcma.bus); break; #endif diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c index 2c79ab52977a..8bf43c5a7bc7 100644 --- a/arch/mips/cavium-octeon/setup.c +++ b/arch/mips/cavium-octeon/setup.c @@ -98,7 +98,7 @@ static void octeon_kexec_smp_down(void *ignored) " sync \n" " synci ($0) \n"); - relocated_kexec_smp_wait(NULL); + kexec_reboot(); } #endif diff --git a/arch/mips/configs/ath79_defconfig b/arch/mips/configs/ath79_defconfig index 4e4ec779f182..6f981af67826 100644 --- a/arch/mips/configs/ath79_defconfig +++ b/arch/mips/configs/ath79_defconfig @@ -66,6 +66,7 @@ CONFIG_SERIAL_8250_CONSOLE=y # CONFIG_SERIAL_8250_PCI is not set CONFIG_SERIAL_8250_NR_UARTS=1 CONFIG_SERIAL_8250_RUNTIME_UARTS=1 +CONFIG_SERIAL_OF_PLATFORM=y CONFIG_SERIAL_AR933X=y CONFIG_SERIAL_AR933X_CONSOLE=y # CONFIG_HW_RANDOM is not set diff --git a/arch/mips/include/asm/mach-lantiq/falcon/falcon_irq.h b/arch/mips/include/asm/mach-lantiq/falcon/falcon_irq.h index c6b63a409641..6dd8ad2409dc 100644 --- a/arch/mips/include/asm/mach-lantiq/falcon/falcon_irq.h +++ b/arch/mips/include/asm/mach-lantiq/falcon/falcon_irq.h @@ -18,8 +18,6 @@ #define INT_NUM_EXTRA_START (INT_NUM_IM4_IRL0 + 32) #define INT_NUM_IM_OFFSET (INT_NUM_IM1_IRL0 - INT_NUM_IM0_IRL0) -#define MIPS_CPU_TIMER_IRQ 7 - #define MAX_IM 5 #endif /* _FALCON_IRQ__ */ diff --git a/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h b/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h index 141076325307..0b424214a5e9 100644 --- a/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h +++ b/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h @@ -19,8 +19,6 @@ #define LTQ_DMA_CH0_INT (INT_NUM_IM2_IRL0) -#define MIPS_CPU_TIMER_IRQ 7 - #define MAX_IM 5 #endif diff --git a/arch/mips/jazz/jazzdma.c b/arch/mips/jazz/jazzdma.c index 6256d35dbf4d..bedb5047aff3 100644 --- a/arch/mips/jazz/jazzdma.c +++ b/arch/mips/jazz/jazzdma.c @@ -74,14 +74,15 @@ static int __init vdma_init(void) get_order(VDMA_PGTBL_SIZE)); BUG_ON(!pgtbl); dma_cache_wback_inv((unsigned long)pgtbl, VDMA_PGTBL_SIZE); - pgtbl = (VDMA_PGTBL_ENTRY *)KSEG1ADDR(pgtbl); + pgtbl = (VDMA_PGTBL_ENTRY *)CKSEG1ADDR((unsigned long)pgtbl); /* * Clear the R4030 translation table */ vdma_pgtbl_init(); - r4030_write_reg32(JAZZ_R4030_TRSTBL_BASE, CPHYSADDR(pgtbl)); + r4030_write_reg32(JAZZ_R4030_TRSTBL_BASE, + CPHYSADDR((unsigned long)pgtbl)); r4030_write_reg32(JAZZ_R4030_TRSTBL_LIM, VDMA_PGTBL_SIZE); r4030_write_reg32(JAZZ_R4030_TRSTBL_INV, 0); diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c index f0bc3312ed11..6549499eb202 100644 --- a/arch/mips/lantiq/irq.c +++ b/arch/mips/lantiq/irq.c @@ -224,9 +224,11 @@ static struct irq_chip ltq_eiu_type = { .irq_set_type = ltq_eiu_settype, }; -static void ltq_hw_irqdispatch(int module) +static void ltq_hw_irq_handler(struct irq_desc *desc) { + int module = irq_desc_get_irq(desc) - 2; u32 irq; + int hwirq; irq = ltq_icu_r32(module, LTQ_ICU_IM0_IOSR); if (irq == 0) @@ -237,7 +239,8 @@ static void ltq_hw_irqdispatch(int module) * other bits might be bogus */ irq = __fls(irq); - do_IRQ((int)irq + MIPS_CPU_IRQ_CASCADE + (INT_NUM_IM_OFFSET * module)); + hwirq = irq + MIPS_CPU_IRQ_CASCADE + (INT_NUM_IM_OFFSET * module); + generic_handle_irq(irq_linear_revmap(ltq_domain, hwirq)); /* if this is a EBU irq, we need to ack it or get a deadlock */ if ((irq == LTQ_ICU_EBU_IRQ) && (module == 0) && LTQ_EBU_PCC_ISTAT) @@ -245,49 +248,6 @@ static void ltq_hw_irqdispatch(int module) LTQ_EBU_PCC_ISTAT); } -#define DEFINE_HWx_IRQDISPATCH(x) \ - static void ltq_hw ## x ## _irqdispatch(void) \ - { \ - ltq_hw_irqdispatch(x); \ - } -DEFINE_HWx_IRQDISPATCH(0) -DEFINE_HWx_IRQDISPATCH(1) -DEFINE_HWx_IRQDISPATCH(2) -DEFINE_HWx_IRQDISPATCH(3) -DEFINE_HWx_IRQDISPATCH(4) - -#if MIPS_CPU_TIMER_IRQ == 7 -static void ltq_hw5_irqdispatch(void) -{ - do_IRQ(MIPS_CPU_TIMER_IRQ); -} -#else -DEFINE_HWx_IRQDISPATCH(5) -#endif - -static void ltq_hw_irq_handler(struct irq_desc *desc) -{ - ltq_hw_irqdispatch(irq_desc_get_irq(desc) - 2); -} - -asmlinkage void plat_irq_dispatch(void) -{ - unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM; - int irq; - - if (!pending) { - spurious_interrupt(); - return; - } - - pending >>= CAUSEB_IP; - while (pending) { - irq = fls(pending) - 1; - do_IRQ(MIPS_CPU_IRQ_BASE + irq); - pending &= ~BIT(irq); - } -} - static int icu_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) { struct irq_chip *chip = <q_irq_type; @@ -343,38 +303,13 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent) for (i = 0; i < MAX_IM; i++) irq_set_chained_handler(i + 2, ltq_hw_irq_handler); - if (cpu_has_vint) { - pr_info("Setting up vectored interrupts\n"); - set_vi_handler(2, ltq_hw0_irqdispatch); - set_vi_handler(3, ltq_hw1_irqdispatch); - set_vi_handler(4, ltq_hw2_irqdispatch); - set_vi_handler(5, ltq_hw3_irqdispatch); - set_vi_handler(6, ltq_hw4_irqdispatch); - set_vi_handler(7, ltq_hw5_irqdispatch); - } - ltq_domain = irq_domain_add_linear(node, (MAX_IM * INT_NUM_IM_OFFSET) + MIPS_CPU_IRQ_CASCADE, &irq_domain_ops, 0); -#ifndef CONFIG_MIPS_MT_SMP - set_c0_status(IE_IRQ0 | IE_IRQ1 | IE_IRQ2 | - IE_IRQ3 | IE_IRQ4 | IE_IRQ5); -#else - set_c0_status(IE_SW0 | IE_SW1 | IE_IRQ0 | IE_IRQ1 | - IE_IRQ2 | IE_IRQ3 | IE_IRQ4 | IE_IRQ5); -#endif - /* tell oprofile which irq to use */ ltq_perfcount_irq = irq_create_mapping(ltq_domain, LTQ_PERF_IRQ); - /* - * if the timer irq is not one of the mips irqs we need to - * create a mapping - */ - if (MIPS_CPU_TIMER_IRQ != 7) - irq_create_mapping(ltq_domain, MIPS_CPU_TIMER_IRQ); - /* the external interrupts are optional and xway only */ eiu_node = of_find_compatible_node(NULL, NULL, "lantiq,eiu-xway"); if (eiu_node && !of_address_to_resource(eiu_node, 0, &res)) { @@ -411,7 +346,7 @@ EXPORT_SYMBOL_GPL(get_c0_perfcount_int); unsigned int get_c0_compare_int(void) { - return MIPS_CPU_TIMER_IRQ; + return CP0_LEGACY_COMPARE_IRQ; } static struct of_device_id __initdata of_irq_ids[] = { diff --git a/arch/mips/pci/msi-octeon.c b/arch/mips/pci/msi-octeon.c index 2a5bb849b10e..288b58b00dc8 100644 --- a/arch/mips/pci/msi-octeon.c +++ b/arch/mips/pci/msi-octeon.c @@ -369,7 +369,9 @@ int __init octeon_msi_initialize(void) int irq; struct irq_chip *msi; - if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_PCIE) { + if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_INVALID) { + return 0; + } else if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_PCIE) { msi_rcv_reg[0] = CVMX_PEXP_NPEI_MSI_RCV0; msi_rcv_reg[1] = CVMX_PEXP_NPEI_MSI_RCV1; msi_rcv_reg[2] = CVMX_PEXP_NPEI_MSI_RCV2; diff --git a/arch/nds32/Makefile b/arch/nds32/Makefile index 0a935c136ec2..ac3482882cf9 100644 --- a/arch/nds32/Makefile +++ b/arch/nds32/Makefile @@ -3,9 +3,6 @@ OBJCOPYFLAGS := -O binary -R .note -R .note.gnu.build-id -R .comment -S KBUILD_DEFCONFIG := defconfig -comma = , - - ifdef CONFIG_FUNCTION_TRACER arch-y += -malways-save-lp -mno-relax endif @@ -54,8 +51,6 @@ endif boot := arch/nds32/boot core-y += $(boot)/dts/ -.PHONY: FORCE - Image: vmlinux $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ @@ -68,9 +63,6 @@ prepare: vdso_prepare vdso_prepare: prepare0 $(Q)$(MAKE) $(build)=arch/nds32/kernel/vdso include/generated/vdso-offsets.h -CLEAN_FILES += include/asm-nds32/constants.h* - -# We use MRPROPER_FILES and CLEAN_FILES now archclean: $(Q)$(MAKE) $(clean)=$(boot) diff --git a/arch/openrisc/Makefile b/arch/openrisc/Makefile index 70e06d34006c..bf10141c7426 100644 --- a/arch/openrisc/Makefile +++ b/arch/openrisc/Makefile @@ -20,7 +20,6 @@ KBUILD_DEFCONFIG := or1ksim_defconfig OBJCOPYFLAGS := -O binary -R .note -R .comment -S -LDFLAGS_vmlinux := LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name) KBUILD_CFLAGS += -pipe -ffixed-r10 -D__linux__ @@ -50,5 +49,3 @@ else BUILTIN_DTB := n endif core-$(BUILTIN_DTB) += arch/openrisc/boot/dts/ - -all: vmlinux diff --git a/arch/powerpc/include/uapi/asm/perf_regs.h b/arch/powerpc/include/uapi/asm/perf_regs.h index ff91192407d1..f599064dd8dc 100644 --- a/arch/powerpc/include/uapi/asm/perf_regs.h +++ b/arch/powerpc/include/uapi/asm/perf_regs.h @@ -47,6 +47,7 @@ enum perf_event_powerpc_regs { PERF_REG_POWERPC_DAR, PERF_REG_POWERPC_DSISR, PERF_REG_POWERPC_SIER, + PERF_REG_POWERPC_MMCRA, PERF_REG_POWERPC_MAX, }; #endif /* _UAPI_ASM_POWERPC_PERF_REGS_H */ diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S index 57deb1e9ffea..20cc816b3508 100644 --- a/arch/powerpc/kernel/head_8xx.S +++ b/arch/powerpc/kernel/head_8xx.S @@ -852,11 +852,12 @@ start_here: /* set up the PTE pointers for the Abatron bdiGDB. */ - tovirt(r6,r6) lis r5, abatron_pteptrs@h ori r5, r5, abatron_pteptrs@l stw r5, 0xf0(0) /* Must match your Abatron config file */ tophys(r5,r5) + lis r6, swapper_pg_dir@h + ori r6, r6, swapper_pg_dir@l stw r6, 0(r5) /* Now turn on the MMU for real! */ diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index bd5e6834ca69..6794466f6420 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c @@ -755,11 +755,12 @@ SYSCALL_DEFINE0(rt_sigreturn) if (restore_tm_sigcontexts(current, &uc->uc_mcontext, &uc_transact->uc_mcontext)) goto badframe; - } + } else #endif - /* Fall through, for non-TM restore */ - if (!MSR_TM_ACTIVE(msr)) { + { /* + * Fall through, for non-TM restore + * * Unset MSR[TS] on the thread regs since MSR from user * context does not have MSR active, and recheckpoint was * not called since restore_tm_sigcontexts() was not called diff --git a/arch/powerpc/kernel/trace/ftrace.c b/arch/powerpc/kernel/trace/ftrace.c index 29746dc28df5..517662a56bdc 100644 --- a/arch/powerpc/kernel/trace/ftrace.c +++ b/arch/powerpc/kernel/trace/ftrace.c @@ -967,13 +967,6 @@ out: } #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ -#if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_PPC64) -unsigned long __init arch_syscall_addr(int nr) -{ - return sys_call_table[nr*2]; -} -#endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_PPC64 */ - #ifdef PPC64_ELF_ABI_v1 char *arch_ftrace_match_adjust(char *str, const char *search) { diff --git a/arch/powerpc/perf/perf_regs.c b/arch/powerpc/perf/perf_regs.c index 5c36b3a8d47a..3349f3f8fe84 100644 --- a/arch/powerpc/perf/perf_regs.c +++ b/arch/powerpc/perf/perf_regs.c @@ -70,6 +70,7 @@ static unsigned int pt_regs_offset[PERF_REG_POWERPC_MAX] = { PT_REGS_OFFSET(PERF_REG_POWERPC_DAR, dar), PT_REGS_OFFSET(PERF_REG_POWERPC_DSISR, dsisr), PT_REGS_OFFSET(PERF_REG_POWERPC_SIER, dar), + PT_REGS_OFFSET(PERF_REG_POWERPC_MMCRA, dsisr), }; u64 perf_reg_value(struct pt_regs *regs, int idx) @@ -83,6 +84,11 @@ u64 perf_reg_value(struct pt_regs *regs, int idx) !is_sier_available())) return 0; + if (idx == PERF_REG_POWERPC_MMCRA && + (IS_ENABLED(CONFIG_FSL_EMB_PERF_EVENT) || + IS_ENABLED(CONFIG_PPC32))) + return 0; + return regs_get_register(regs, pt_regs_offset[idx]); } diff --git a/arch/powerpc/platforms/4xx/ocm.c b/arch/powerpc/platforms/4xx/ocm.c index a1aaa1569d7c..f0e488d97567 100644 --- a/arch/powerpc/platforms/4xx/ocm.c +++ b/arch/powerpc/platforms/4xx/ocm.c @@ -237,12 +237,12 @@ static int ocm_debugfs_show(struct seq_file *m, void *v) continue; seq_printf(m, "PPC4XX OCM : %d\n", ocm->index); - seq_printf(m, "PhysAddr : %pa[p]\n", &(ocm->phys)); + seq_printf(m, "PhysAddr : %pa\n", &(ocm->phys)); seq_printf(m, "MemTotal : %d Bytes\n", ocm->memtotal); seq_printf(m, "MemTotal(NC) : %d Bytes\n", ocm->nc.memtotal); seq_printf(m, "MemTotal(C) : %d Bytes\n\n", ocm->c.memtotal); - seq_printf(m, "NC.PhysAddr : %pa[p]\n", &(ocm->nc.phys)); + seq_printf(m, "NC.PhysAddr : %pa\n", &(ocm->nc.phys)); seq_printf(m, "NC.VirtAddr : 0x%p\n", ocm->nc.virt); seq_printf(m, "NC.MemTotal : %d Bytes\n", ocm->nc.memtotal); seq_printf(m, "NC.MemFree : %d Bytes\n", ocm->nc.memfree); @@ -252,7 +252,7 @@ static int ocm_debugfs_show(struct seq_file *m, void *v) blk->size, blk->owner); } - seq_printf(m, "\nC.PhysAddr : %pa[p]\n", &(ocm->c.phys)); + seq_printf(m, "\nC.PhysAddr : %pa\n", &(ocm->c.phys)); seq_printf(m, "C.VirtAddr : 0x%p\n", ocm->c.virt); seq_printf(m, "C.MemTotal : %d Bytes\n", ocm->c.memtotal); seq_printf(m, "C.MemFree : %d Bytes\n", ocm->c.memfree); diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c index e66644e0fb40..9438fa0fc355 100644 --- a/arch/powerpc/platforms/chrp/setup.c +++ b/arch/powerpc/platforms/chrp/setup.c @@ -538,8 +538,7 @@ static void __init chrp_init_IRQ(void) /* see if there is a keyboard in the device tree with a parent of type "adb" */ for_each_node_by_name(kbd, "keyboard") - if (kbd->parent && kbd->parent->type - && strcmp(kbd->parent->type, "adb") == 0) + if (of_node_is_type(kbd->parent, "adb")) break; of_node_put(kbd); if (kbd) diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c index d7f742ed48ba..3f58c7dbd581 100644 --- a/arch/powerpc/platforms/powernv/npu-dma.c +++ b/arch/powerpc/platforms/powernv/npu-dma.c @@ -564,7 +564,7 @@ struct iommu_table_group *pnv_try_setup_npu_table_group(struct pnv_ioda_pe *pe) } } else { /* Create a group for 1 GPU and attached NPUs for POWER8 */ - pe->npucomp = kzalloc(sizeof(pe->npucomp), GFP_KERNEL); + pe->npucomp = kzalloc(sizeof(*pe->npucomp), GFP_KERNEL); table_group = &pe->npucomp->table_group; table_group->ops = &pnv_npu_peers_ops; iommu_register_group(table_group, hose->global_number, diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 1d6406a051f1..7db3119f8a5b 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -2681,7 +2681,8 @@ static void pnv_pci_ioda_setup_iommu_api(void) list_for_each_entry(hose, &hose_list, list_node) { phb = hose->private_data; - if (phb->type == PNV_PHB_NPU_NVLINK) + if (phb->type == PNV_PHB_NPU_NVLINK || + phb->type == PNV_PHB_NPU_OCAPI) continue; list_for_each_entry(pe, &phb->ioda.pe_list, list) { diff --git a/arch/powerpc/platforms/pseries/pci.c b/arch/powerpc/platforms/pseries/pci.c index 7725825d887d..37a77e57893e 100644 --- a/arch/powerpc/platforms/pseries/pci.c +++ b/arch/powerpc/platforms/pseries/pci.c @@ -264,7 +264,9 @@ void __init pSeries_final_fixup(void) if (!of_device_is_compatible(nvdn->parent, "ibm,power9-npu")) continue; +#ifdef CONFIG_PPC_POWERNV WARN_ON_ONCE(pnv_npu2_init(hose)); +#endif break; } } diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 15af091611e2..4b4a7f32b68e 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -617,7 +617,7 @@ config X86_INTEL_QUARK config X86_INTEL_LPSS bool "Intel Low Power Subsystem Support" - depends on X86 && ACPI + depends on X86 && ACPI && PCI select COMMON_CLK select PINCTRL select IOSF_MBI diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index a77445d1b034..780f2b42c8ef 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -711,7 +711,7 @@ static __must_check inline bool user_access_begin(const void __user *ptr, size_t { if (unlikely(!access_ok(ptr,len))) return 0; - __uaccess_begin(); + __uaccess_begin_nospec(); return 1; } #define user_access_begin(a,b) user_access_begin(a,b) diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c index 2f6787fc7106..c54a493e139a 100644 --- a/arch/x86/xen/enlighten_pv.c +++ b/arch/x86/xen/enlighten_pv.c @@ -898,10 +898,7 @@ static u64 xen_read_msr_safe(unsigned int msr, int *err) val = native_read_msr_safe(msr, err); switch (msr) { case MSR_IA32_APICBASE: -#ifdef CONFIG_X86_X2APIC - if (!(cpuid_ecx(1) & (1 << (X86_FEATURE_X2APIC & 31)))) -#endif - val &= ~X2APIC_ENABLE; + val &= ~X2APIC_ENABLE; break; } return val; diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c index 72bf446c3fee..6e29794573b7 100644 --- a/arch/x86/xen/time.c +++ b/arch/x86/xen/time.c @@ -361,8 +361,6 @@ void xen_timer_resume(void) { int cpu; - pvclock_resume(); - if (xen_clockevent != &xen_vcpuop_clockevent) return; @@ -379,12 +377,15 @@ static const struct pv_time_ops xen_time_ops __initconst = { }; static struct pvclock_vsyscall_time_info *xen_clock __read_mostly; +static u64 xen_clock_value_saved; void xen_save_time_memory_area(void) { struct vcpu_register_time_memory_area t; int ret; + xen_clock_value_saved = xen_clocksource_read() - xen_sched_clock_offset; + if (!xen_clock) return; @@ -404,7 +405,7 @@ void xen_restore_time_memory_area(void) int ret; if (!xen_clock) - return; + goto out; t.addr.v = &xen_clock->pvti; @@ -421,6 +422,11 @@ void xen_restore_time_memory_area(void) if (ret != 0) pr_notice("Cannot restore secondary vcpu_time_info (err %d)", ret); + +out: + /* Need pvclock_resume() before using xen_clocksource_read(). */ + pvclock_resume(); + xen_sched_clock_offset = xen_clocksource_read() - xen_clock_value_saved; } static void xen_setup_vsyscall_time_info(void) diff --git a/block/bfq-wf2q.c b/block/bfq-wf2q.c index 63e0f12be7c9..72adbbe975d5 100644 --- a/block/bfq-wf2q.c +++ b/block/bfq-wf2q.c @@ -1154,15 +1154,14 @@ static void bfq_activate_requeue_entity(struct bfq_entity *entity, } /** - * __bfq_deactivate_entity - deactivate an entity from its service tree. - * @entity: the entity to deactivate. + * __bfq_deactivate_entity - update sched_data and service trees for + * entity, so as to represent entity as inactive + * @entity: the entity being deactivated. * @ins_into_idle_tree: if false, the entity will not be put into the * idle tree. * - * Deactivates an entity, independently of its previous state. Must - * be invoked only if entity is on a service tree. Extracts the entity - * from that tree, and if necessary and allowed, puts it into the idle - * tree. + * If necessary and allowed, puts entity into the idle tree. NOTE: + * entity may be on no tree if in service. */ bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree) { diff --git a/block/blk-mq-debugfs-zoned.c b/block/blk-mq-debugfs-zoned.c index fb2c82c351e4..038cb627c868 100644 --- a/block/blk-mq-debugfs-zoned.c +++ b/block/blk-mq-debugfs-zoned.c @@ -1,8 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2017 Western Digital Corporation or its affiliates. - * - * This file is released under the GPL. */ #include <linux/blkdev.h> diff --git a/block/blk-mq.c b/block/blk-mq.c index 3ba37b9e15e9..8f5b533764ca 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1906,7 +1906,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) { const int is_sync = op_is_sync(bio->bi_opf); const int is_flush_fua = op_is_flush(bio->bi_opf); - struct blk_mq_alloc_data data = { .flags = 0, .cmd_flags = bio->bi_opf }; + struct blk_mq_alloc_data data = { .flags = 0}; struct request *rq; struct blk_plug *plug; struct request *same_queue_rq = NULL; @@ -1928,6 +1928,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) rq_qos_throttle(q, bio); + data.cmd_flags = bio->bi_opf; rq = blk_mq_get_request(q, bio, &data); if (unlikely(!rq)) { rq_qos_cleanup(q, bio); diff --git a/crypto/adiantum.c b/crypto/adiantum.c index 6651e713c45d..5564e73266a6 100644 --- a/crypto/adiantum.c +++ b/crypto/adiantum.c @@ -539,6 +539,8 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb) ictx = skcipher_instance_ctx(inst); /* Stream cipher, e.g. "xchacha12" */ + crypto_set_skcipher_spawn(&ictx->streamcipher_spawn, + skcipher_crypto_instance(inst)); err = crypto_grab_skcipher(&ictx->streamcipher_spawn, streamcipher_name, 0, crypto_requires_sync(algt->type, algt->mask)); @@ -547,6 +549,8 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb) streamcipher_alg = crypto_spawn_skcipher_alg(&ictx->streamcipher_spawn); /* Block cipher, e.g. "aes" */ + crypto_set_spawn(&ictx->blockcipher_spawn, + skcipher_crypto_instance(inst)); err = crypto_grab_spawn(&ictx->blockcipher_spawn, blockcipher_name, CRYPTO_ALG_TYPE_CIPHER, CRYPTO_ALG_TYPE_MASK); if (err) diff --git a/crypto/authenc.c b/crypto/authenc.c index 37f54d1b2f66..4be293a4b5f0 100644 --- a/crypto/authenc.c +++ b/crypto/authenc.c @@ -58,14 +58,22 @@ int crypto_authenc_extractkeys(struct crypto_authenc_keys *keys, const u8 *key, return -EINVAL; if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) return -EINVAL; - if (RTA_PAYLOAD(rta) < sizeof(*param)) + + /* + * RTA_OK() didn't align the rtattr's payload when validating that it + * fits in the buffer. Yet, the keys should start on the next 4-byte + * aligned boundary. To avoid confusion, require that the rtattr + * payload be exactly the param struct, which has a 4-byte aligned size. + */ + if (RTA_PAYLOAD(rta) != sizeof(*param)) return -EINVAL; + BUILD_BUG_ON(sizeof(*param) % RTA_ALIGNTO); param = RTA_DATA(rta); keys->enckeylen = be32_to_cpu(param->enckeylen); - key += RTA_ALIGN(rta->rta_len); - keylen -= RTA_ALIGN(rta->rta_len); + key += rta->rta_len; + keylen -= rta->rta_len; if (keylen < keys->enckeylen) return -EINVAL; diff --git a/crypto/authencesn.c b/crypto/authencesn.c index 80a25cc04aec..4741fe89ba2c 100644 --- a/crypto/authencesn.c +++ b/crypto/authencesn.c @@ -279,7 +279,7 @@ static void authenc_esn_verify_ahash_done(struct crypto_async_request *areq, struct aead_request *req = areq->data; err = err ?: crypto_authenc_esn_decrypt_tail(req, 0); - aead_request_complete(req, err); + authenc_esn_request_complete(req, err); } static int crypto_authenc_esn_decrypt(struct aead_request *req) diff --git a/crypto/sm3_generic.c b/crypto/sm3_generic.c index 9a5c60f08aad..c0cf87ae7ef6 100644 --- a/crypto/sm3_generic.c +++ b/crypto/sm3_generic.c @@ -100,7 +100,7 @@ static void sm3_compress(u32 *w, u32 *wt, u32 *m) for (i = 0; i <= 63; i++) { - ss1 = rol32((rol32(a, 12) + e + rol32(t(i), i)), 7); + ss1 = rol32((rol32(a, 12) + e + rol32(t(i), i & 31)), 7); ss2 = ss1 ^ rol32(a, 12); diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile index 7c6afc111d76..bb857421c2e8 100644 --- a/drivers/acpi/Makefile +++ b/drivers/acpi/Makefile @@ -41,7 +41,8 @@ acpi-y += ec.o acpi-$(CONFIG_ACPI_DOCK) += dock.o acpi-$(CONFIG_PCI) += pci_root.o pci_link.o pci_irq.o obj-$(CONFIG_ACPI_MCFG) += pci_mcfg.o -acpi-y += acpi_lpss.o acpi_apd.o +acpi-$(CONFIG_PCI) += acpi_lpss.o +acpi-y += acpi_apd.o acpi-y += acpi_platform.o acpi-y += acpi_pnp.o acpi-$(CONFIG_ARM_AMBA) += acpi_amba.o diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index 99d820a693a8..5c093ce01bcd 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -1054,18 +1054,6 @@ void __init acpi_early_init(void) goto error0; } - /* - * ACPI 2.0 requires the EC driver to be loaded and work before - * the EC device is found in the namespace (i.e. before - * acpi_load_tables() is called). - * - * This is accomplished by looking for the ECDT table, and getting - * the EC parameters out of that. - * - * Ignore the result. Not having an ECDT is not fatal. - */ - status = acpi_ec_ecdt_probe(); - #ifdef CONFIG_X86 if (!acpi_ioapic) { /* compatible (0) means level (3) */ @@ -1142,6 +1130,18 @@ static int __init acpi_bus_init(void) goto error1; } + /* + * ACPI 2.0 requires the EC driver to be loaded and work before the EC + * device is found in the namespace. + * + * This is accomplished by looking for the ECDT table and getting the EC + * parameters out of that. + * + * Do that before calling acpi_initialize_objects() which may trigger EC + * address space accesses. + */ + acpi_ec_ecdt_probe(); + status = acpi_enable_subsystem(ACPI_NO_ACPI_ENABLE); if (ACPI_FAILURE(status)) { printk(KERN_ERR PREFIX diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h index 7e6952edb5b0..6a9e1fb8913a 100644 --- a/drivers/acpi/internal.h +++ b/drivers/acpi/internal.h @@ -81,7 +81,11 @@ void acpi_debugfs_init(void); #else static inline void acpi_debugfs_init(void) { return; } #endif +#ifdef CONFIG_PCI void acpi_lpss_init(void); +#else +static inline void acpi_lpss_init(void) {} +#endif void acpi_apd_init(void); diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c index 011d3db19c80..5143e11e3b0f 100644 --- a/drivers/acpi/nfit/core.c +++ b/drivers/acpi/nfit/core.c @@ -26,7 +26,6 @@ #include <acpi/nfit.h> #include "intel.h" #include "nfit.h" -#include "intel.h" /* * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is @@ -78,12 +77,6 @@ const guid_t *to_nfit_uuid(enum nfit_uuids id) } EXPORT_SYMBOL(to_nfit_uuid); -static struct acpi_nfit_desc *to_acpi_nfit_desc( - struct nvdimm_bus_descriptor *nd_desc) -{ - return container_of(nd_desc, struct acpi_nfit_desc, nd_desc); -} - static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc) { struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; @@ -419,7 +412,7 @@ static bool payload_dumpable(struct nvdimm *nvdimm, unsigned int func) int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc) { - struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc); + struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); union acpi_object in_obj, in_buf, *out_obj; const struct nd_cmd_desc *desc = NULL; @@ -721,6 +714,7 @@ int nfit_get_smbios_id(u32 device_handle, u16 *flags) struct acpi_nfit_memory_map *memdev; struct acpi_nfit_desc *acpi_desc; struct nfit_mem *nfit_mem; + u16 physical_id; mutex_lock(&acpi_desc_lock); list_for_each_entry(acpi_desc, &acpi_descs, list) { @@ -728,10 +722,11 @@ int nfit_get_smbios_id(u32 device_handle, u16 *flags) list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) { memdev = __to_nfit_memdev(nfit_mem); if (memdev->device_handle == device_handle) { + *flags = memdev->flags; + physical_id = memdev->physical_id; mutex_unlock(&acpi_desc->init_mutex); mutex_unlock(&acpi_desc_lock); - *flags = memdev->flags; - return memdev->physical_id; + return physical_id; } } mutex_unlock(&acpi_desc->init_mutex); @@ -2231,7 +2226,6 @@ static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc, nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL); if (!nd_set) return -ENOMEM; - ndr_desc->nd_set = nd_set; guid_copy(&nd_set->type_guid, (guid_t *) spa->range_guid); info = devm_kzalloc(dev, sizeof_nfit_set_info(nr), GFP_KERNEL); @@ -3367,7 +3361,7 @@ EXPORT_SYMBOL_GPL(acpi_nfit_init); static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc) { - struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc); + struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); struct device *dev = acpi_desc->dev; /* Bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */ @@ -3384,7 +3378,7 @@ static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc) static int __acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, unsigned int cmd) { - struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc); + struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); if (nvdimm) return 0; diff --git a/drivers/acpi/nfit/intel.c b/drivers/acpi/nfit/intel.c index 850b2927b4e7..f70de71f79d6 100644 --- a/drivers/acpi/nfit/intel.c +++ b/drivers/acpi/nfit/intel.c @@ -146,7 +146,7 @@ static int intel_security_change_key(struct nvdimm *nvdimm, static void nvdimm_invalidate_cache(void); -static int intel_security_unlock(struct nvdimm *nvdimm, +static int __maybe_unused intel_security_unlock(struct nvdimm *nvdimm, const struct nvdimm_key_data *key_data) { struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); @@ -227,7 +227,7 @@ static int intel_security_disable(struct nvdimm *nvdimm, return 0; } -static int intel_security_erase(struct nvdimm *nvdimm, +static int __maybe_unused intel_security_erase(struct nvdimm *nvdimm, const struct nvdimm_key_data *key, enum nvdimm_passphrase_type ptype) { @@ -276,7 +276,7 @@ static int intel_security_erase(struct nvdimm *nvdimm, return 0; } -static int intel_security_query_overwrite(struct nvdimm *nvdimm) +static int __maybe_unused intel_security_query_overwrite(struct nvdimm *nvdimm) { int rc; struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); @@ -313,7 +313,7 @@ static int intel_security_query_overwrite(struct nvdimm *nvdimm) return 0; } -static int intel_security_overwrite(struct nvdimm *nvdimm, +static int __maybe_unused intel_security_overwrite(struct nvdimm *nvdimm, const struct nvdimm_key_data *nkey) { int rc; diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index 4ca7a6b4eaae..8218db17ebdb 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig @@ -1091,7 +1091,7 @@ comment "Generic fallback / legacy drivers" config PATA_ACPI tristate "ACPI firmware driver for PATA" - depends on ATA_ACPI && ATA_BMDMA + depends on ATA_ACPI && ATA_BMDMA && PCI help This option enables an ACPI method driver which drives motherboard PATA controller interfaces through the ACPI diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c index 1bd1145ad8b5..330c1f7e9665 100644 --- a/drivers/base/regmap/regmap-irq.c +++ b/drivers/base/regmap/regmap-irq.c @@ -108,6 +108,9 @@ static void regmap_irq_sync_unlock(struct irq_data *data) * suppress pointless writes. */ for (i = 0; i < d->chip->num_regs; i++) { + if (!d->chip->mask_base) + continue; + reg = d->chip->mask_base + (i * map->reg_stride * d->irq_reg_stride); if (d->chip->mask_invert) { @@ -258,7 +261,7 @@ static int regmap_irq_set_type(struct irq_data *data, unsigned int type) const struct regmap_irq_type *t = &irq_data->type; if ((t->types_supported & type) != type) - return -ENOTSUPP; + return 0; reg = t->type_reg_offset / map->reg_stride; @@ -588,6 +591,9 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, /* Mask all the interrupts by default */ for (i = 0; i < chip->num_regs; i++) { d->mask_buf[i] = d->mask_buf_def[i]; + if (!chip->mask_base) + continue; + reg = chip->mask_base + (i * map->reg_stride * d->irq_reg_stride); if (chip->mask_invert) diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 08696f5f00bb..7c9a949e876b 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -288,9 +288,10 @@ static void nbd_size_update(struct nbd_device *nbd) blk_queue_physical_block_size(nbd->disk->queue, config->blksize); set_capacity(nbd->disk, config->bytesize >> 9); if (bdev) { - if (bdev->bd_disk) + if (bdev->bd_disk) { bd_set_size(bdev, config->bytesize); - else + set_blocksize(bdev, config->blksize); + } else bdev->bd_invalidated = 1; bdput(bdev); } diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 5a90075f719d..0be55fcc19ba 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -692,6 +692,7 @@ config CRYPTO_DEV_BCM_SPU depends on ARCH_BCM_IPROC depends on MAILBOX default m + select CRYPTO_AUTHENC select CRYPTO_DES select CRYPTO_MD5 select CRYPTO_SHA1 diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c index c9393ffb70ed..5567cbda2798 100644 --- a/drivers/crypto/bcm/cipher.c +++ b/drivers/crypto/bcm/cipher.c @@ -2845,44 +2845,28 @@ static int aead_authenc_setkey(struct crypto_aead *cipher, struct spu_hw *spu = &iproc_priv.spu; struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher); struct crypto_tfm *tfm = crypto_aead_tfm(cipher); - struct rtattr *rta = (void *)key; - struct crypto_authenc_key_param *param; - const u8 *origkey = key; - const unsigned int origkeylen = keylen; - - int ret = 0; + struct crypto_authenc_keys keys; + int ret; flow_log("%s() aead:%p key:%p keylen:%u\n", __func__, cipher, key, keylen); flow_dump(" key: ", key, keylen); - if (!RTA_OK(rta, keylen)) - goto badkey; - if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) - goto badkey; - if (RTA_PAYLOAD(rta) < sizeof(*param)) + ret = crypto_authenc_extractkeys(&keys, key, keylen); + if (ret) goto badkey; - param = RTA_DATA(rta); - ctx->enckeylen = be32_to_cpu(param->enckeylen); - - key += RTA_ALIGN(rta->rta_len); - keylen -= RTA_ALIGN(rta->rta_len); - - if (keylen < ctx->enckeylen) - goto badkey; - if (ctx->enckeylen > MAX_KEY_SIZE) + if (keys.enckeylen > MAX_KEY_SIZE || + keys.authkeylen > MAX_KEY_SIZE) goto badkey; - ctx->authkeylen = keylen - ctx->enckeylen; - - if (ctx->authkeylen > MAX_KEY_SIZE) - goto badkey; + ctx->enckeylen = keys.enckeylen; + ctx->authkeylen = keys.authkeylen; - memcpy(ctx->enckey, key + ctx->authkeylen, ctx->enckeylen); + memcpy(ctx->enckey, keys.enckey, keys.enckeylen); /* May end up padding auth key. So make sure it's zeroed. */ memset(ctx->authkey, 0, sizeof(ctx->authkey)); - memcpy(ctx->authkey, key, ctx->authkeylen); + memcpy(ctx->authkey, keys.authkey, keys.authkeylen); switch (ctx->alg->cipher_info.alg) { case CIPHER_ALG_DES: @@ -2890,7 +2874,7 @@ static int aead_authenc_setkey(struct crypto_aead *cipher, u32 tmp[DES_EXPKEY_WORDS]; u32 flags = CRYPTO_TFM_RES_WEAK_KEY; - if (des_ekey(tmp, key) == 0) { + if (des_ekey(tmp, keys.enckey) == 0) { if (crypto_aead_get_flags(cipher) & CRYPTO_TFM_REQ_WEAK_KEY) { crypto_aead_set_flags(cipher, flags); @@ -2905,7 +2889,7 @@ static int aead_authenc_setkey(struct crypto_aead *cipher, break; case CIPHER_ALG_3DES: if (ctx->enckeylen == (DES_KEY_SIZE * 3)) { - const u32 *K = (const u32 *)key; + const u32 *K = (const u32 *)keys.enckey; u32 flags = CRYPTO_TFM_RES_BAD_KEY_SCHED; if (!((K[0] ^ K[2]) | (K[1] ^ K[3])) || @@ -2956,9 +2940,7 @@ static int aead_authenc_setkey(struct crypto_aead *cipher, ctx->fallback_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; ctx->fallback_cipher->base.crt_flags |= tfm->crt_flags & CRYPTO_TFM_REQ_MASK; - ret = - crypto_aead_setkey(ctx->fallback_cipher, origkey, - origkeylen); + ret = crypto_aead_setkey(ctx->fallback_cipher, key, keylen); if (ret) { flow_log(" fallback setkey() returned:%d\n", ret); tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index 92e593e2069a..80ae69f906fb 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -3476,7 +3476,7 @@ static int __init caam_algapi_init(void) * Skip algorithms requiring message digests * if MD or MD size is not supported by device. */ - if ((c2_alg_sel & ~OP_ALG_ALGSEL_SUBMASK) == 0x40 && + if (is_mdha(c2_alg_sel) && (!md_inst || t_alg->aead.maxauthsize > md_limit)) continue; diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index 81712aa5d0f2..bb1a2cdf1951 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -1072,13 +1072,16 @@ static int ahash_final_no_ctx(struct ahash_request *req) desc = edesc->hw_desc; - state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE); - if (dma_mapping_error(jrdev, state->buf_dma)) { - dev_err(jrdev, "unable to map src\n"); - goto unmap; - } + if (buflen) { + state->buf_dma = dma_map_single(jrdev, buf, buflen, + DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, state->buf_dma)) { + dev_err(jrdev, "unable to map src\n"); + goto unmap; + } - append_seq_in_ptr(desc, state->buf_dma, buflen, 0); + append_seq_in_ptr(desc, state->buf_dma, buflen, 0); + } edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, digestsize); diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h index ec10230178c5..4b6854bf896a 100644 --- a/drivers/crypto/caam/desc.h +++ b/drivers/crypto/caam/desc.h @@ -1155,6 +1155,7 @@ #define OP_ALG_ALGSEL_DES (0x20 << OP_ALG_ALGSEL_SHIFT) #define OP_ALG_ALGSEL_3DES (0x21 << OP_ALG_ALGSEL_SHIFT) #define OP_ALG_ALGSEL_ARC4 (0x30 << OP_ALG_ALGSEL_SHIFT) +#define OP_ALG_CHA_MDHA (0x40 << OP_ALG_ALGSEL_SHIFT) #define OP_ALG_ALGSEL_MD5 (0x40 << OP_ALG_ALGSEL_SHIFT) #define OP_ALG_ALGSEL_SHA1 (0x41 << OP_ALG_ALGSEL_SHIFT) #define OP_ALG_ALGSEL_SHA224 (0x42 << OP_ALG_ALGSEL_SHIFT) diff --git a/drivers/crypto/caam/error.h b/drivers/crypto/caam/error.h index 67ea94079837..8c6b83e02a70 100644 --- a/drivers/crypto/caam/error.h +++ b/drivers/crypto/caam/error.h @@ -7,6 +7,9 @@ #ifndef CAAM_ERROR_H #define CAAM_ERROR_H + +#include "desc.h" + #define CAAM_ERROR_STR_MAX 302 void caam_strstatus(struct device *dev, u32 status, bool qi_v2); @@ -17,4 +20,10 @@ void caam_strstatus(struct device *dev, u32 status, bool qi_v2); void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type, int rowsize, int groupsize, struct scatterlist *sg, size_t tlen, bool ascii); + +static inline bool is_mdha(u32 algtype) +{ + return (algtype & OP_ALG_ALGSEL_MASK & ~OP_ALG_ALGSEL_SUBMASK) == + OP_ALG_CHA_MDHA; +} #endif /* CAAM_ERROR_H */ diff --git a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c index e34e4df8fd24..fe070d75c842 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c +++ b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c @@ -567,10 +567,10 @@ static void process_response_list(struct nitrox_cmdq *cmdq) /* ORH error code */ err = READ_ONCE(*sr->resp.orh) & 0xff; - softreq_destroy(sr); if (sr->callback) sr->callback(sr->cb_arg, err); + softreq_destroy(sr); req_completed++; } diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c index f2643cda45db..a3527c00b29a 100644 --- a/drivers/crypto/ccree/cc_aead.c +++ b/drivers/crypto/ccree/cc_aead.c @@ -549,13 +549,12 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen) { struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); - struct rtattr *rta = (struct rtattr *)key; struct cc_crypto_req cc_req = {}; - struct crypto_authenc_key_param *param; struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ]; - int rc = -EINVAL; unsigned int seq_len = 0; struct device *dev = drvdata_to_dev(ctx->drvdata); + const u8 *enckey, *authkey; + int rc; dev_dbg(dev, "Setting key in context @%p for %s. key=%p keylen=%u\n", ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)), key, keylen); @@ -563,35 +562,33 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key, /* STAT_PHASE_0: Init and sanity checks */ if (ctx->auth_mode != DRV_HASH_NULL) { /* authenc() alg. */ - if (!RTA_OK(rta, keylen)) - goto badkey; - if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) - goto badkey; - if (RTA_PAYLOAD(rta) < sizeof(*param)) - goto badkey; - param = RTA_DATA(rta); - ctx->enc_keylen = be32_to_cpu(param->enckeylen); - key += RTA_ALIGN(rta->rta_len); - keylen -= RTA_ALIGN(rta->rta_len); - if (keylen < ctx->enc_keylen) + struct crypto_authenc_keys keys; + + rc = crypto_authenc_extractkeys(&keys, key, keylen); + if (rc) goto badkey; - ctx->auth_keylen = keylen - ctx->enc_keylen; + enckey = keys.enckey; + authkey = keys.authkey; + ctx->enc_keylen = keys.enckeylen; + ctx->auth_keylen = keys.authkeylen; if (ctx->cipher_mode == DRV_CIPHER_CTR) { /* the nonce is stored in bytes at end of key */ + rc = -EINVAL; if (ctx->enc_keylen < (AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE)) goto badkey; /* Copy nonce from last 4 bytes in CTR key to * first 4 bytes in CTR IV */ - memcpy(ctx->ctr_nonce, key + ctx->auth_keylen + - ctx->enc_keylen - CTR_RFC3686_NONCE_SIZE, - CTR_RFC3686_NONCE_SIZE); + memcpy(ctx->ctr_nonce, enckey + ctx->enc_keylen - + CTR_RFC3686_NONCE_SIZE, CTR_RFC3686_NONCE_SIZE); /* Set CTR key size */ ctx->enc_keylen -= CTR_RFC3686_NONCE_SIZE; } } else { /* non-authenc - has just one key */ + enckey = key; + authkey = NULL; ctx->enc_keylen = keylen; ctx->auth_keylen = 0; } @@ -603,13 +600,14 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key, /* STAT_PHASE_1: Copy key to ctx */ /* Get key material */ - memcpy(ctx->enckey, key + ctx->auth_keylen, ctx->enc_keylen); + memcpy(ctx->enckey, enckey, ctx->enc_keylen); if (ctx->enc_keylen == 24) memset(ctx->enckey + 24, 0, CC_AES_KEY_SIZE_MAX - 24); if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { - memcpy(ctx->auth_state.xcbc.xcbc_keys, key, ctx->auth_keylen); + memcpy(ctx->auth_state.xcbc.xcbc_keys, authkey, + ctx->auth_keylen); } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */ - rc = cc_get_plain_hmac_key(tfm, key, ctx->auth_keylen); + rc = cc_get_plain_hmac_key(tfm, authkey, ctx->auth_keylen); if (rc) goto badkey; } diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 45e20707cef8..f8e2c5c3f4eb 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -1361,23 +1361,18 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, struct talitos_private *priv = dev_get_drvdata(dev); bool is_sec1 = has_ftr_sec1(priv); int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN; - void *err; if (cryptlen + authsize > max_len) { dev_err(dev, "length exceeds h/w max limit\n"); return ERR_PTR(-EINVAL); } - if (ivsize) - iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE); - if (!dst || dst == src) { src_len = assoclen + cryptlen + authsize; src_nents = sg_nents_for_len(src, src_len); if (src_nents < 0) { dev_err(dev, "Invalid number of src SG.\n"); - err = ERR_PTR(-EINVAL); - goto error_sg; + return ERR_PTR(-EINVAL); } src_nents = (src_nents == 1) ? 0 : src_nents; dst_nents = dst ? src_nents : 0; @@ -1387,16 +1382,14 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, src_nents = sg_nents_for_len(src, src_len); if (src_nents < 0) { dev_err(dev, "Invalid number of src SG.\n"); - err = ERR_PTR(-EINVAL); - goto error_sg; + return ERR_PTR(-EINVAL); } src_nents = (src_nents == 1) ? 0 : src_nents; dst_len = assoclen + cryptlen + (encrypt ? authsize : 0); dst_nents = sg_nents_for_len(dst, dst_len); if (dst_nents < 0) { dev_err(dev, "Invalid number of dst SG.\n"); - err = ERR_PTR(-EINVAL); - goto error_sg; + return ERR_PTR(-EINVAL); } dst_nents = (dst_nents == 1) ? 0 : dst_nents; } @@ -1423,11 +1416,14 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, /* if its a ahash, add space for a second desc next to the first one */ if (is_sec1 && !dst) alloc_len += sizeof(struct talitos_desc); + alloc_len += ivsize; edesc = kmalloc(alloc_len, GFP_DMA | flags); - if (!edesc) { - err = ERR_PTR(-ENOMEM); - goto error_sg; + if (!edesc) + return ERR_PTR(-ENOMEM); + if (ivsize) { + iv = memcpy(((u8 *)edesc) + alloc_len - ivsize, iv, ivsize); + iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE); } memset(&edesc->desc, 0, sizeof(edesc->desc)); @@ -1445,10 +1441,6 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, DMA_BIDIRECTIONAL); } return edesc; -error_sg: - if (iv_dma) - dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE); - return err; } static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index dafc645b2e4e..b083b219b1a9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -531,17 +531,6 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev, struct drm_gem_object *obj; struct amdgpu_framebuffer *amdgpu_fb; int ret; - int height; - struct amdgpu_device *adev = dev->dev_private; - int cpp = drm_format_plane_cpp(mode_cmd->pixel_format, 0); - int pitch = mode_cmd->pitches[0] / cpp; - - pitch = amdgpu_align_pitch(adev, pitch, cpp, false); - if (mode_cmd->pitches[0] != pitch) { - DRM_DEBUG_KMS("Invalid pitch: expecting %d but got %d\n", - pitch, mode_cmd->pitches[0]); - return ERR_PTR(-EINVAL); - } obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]); if (obj == NULL) { @@ -556,13 +545,6 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev, return ERR_PTR(-EINVAL); } - height = ALIGN(mode_cmd->height, 8); - if (obj->size < pitch * height) { - DRM_DEBUG_KMS("Invalid GEM size: expecting >= %d but got %zu\n", - pitch * height, obj->size); - return ERR_PTR(-EINVAL); - } - amdgpu_fb = kzalloc(sizeof(*amdgpu_fb), GFP_KERNEL); if (amdgpu_fb == NULL) { drm_gem_object_put_unlocked(obj); diff --git a/drivers/gpu/drm/amd/amdkfd/Kconfig b/drivers/gpu/drm/amd/amdkfd/Kconfig index fbf0ee5201c3..c3613604a4f8 100644 --- a/drivers/gpu/drm/amd/amdkfd/Kconfig +++ b/drivers/gpu/drm/amd/amdkfd/Kconfig @@ -4,8 +4,8 @@ config HSA_AMD bool "HSA kernel driver for AMD GPU devices" - depends on DRM_AMDGPU && X86_64 - imply AMD_IOMMU_V2 + depends on DRM_AMDGPU && (X86_64 || ARM64) + imply AMD_IOMMU_V2 if X86_64 select MMU_NOTIFIER help Enable this if you want to use HSA features on AMD GPU devices. diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c index b7bc7d7d048f..5d85ff341385 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c @@ -863,6 +863,7 @@ static int kfd_fill_mem_info_for_cpu(int numa_node_id, int *avail_size, return 0; } +#if CONFIG_X86_64 static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size, uint32_t *num_entries, struct crat_subtype_iolink *sub_type_hdr) @@ -905,6 +906,7 @@ static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size, return 0; } +#endif /* kfd_create_vcrat_image_cpu - Create Virtual CRAT for CPU * @@ -920,7 +922,9 @@ static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size) struct crat_subtype_generic *sub_type_hdr; int avail_size = *size; int numa_node_id; +#ifdef CONFIG_X86_64 uint32_t entries = 0; +#endif int ret = 0; if (!pcrat_image || avail_size < VCRAT_SIZE_FOR_CPU) @@ -982,6 +986,7 @@ static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size) sub_type_hdr->length); /* Fill in Subtype: IO Link */ +#ifdef CONFIG_X86_64 ret = kfd_fill_iolink_info_for_cpu(numa_node_id, &avail_size, &entries, (struct crat_subtype_iolink *)sub_type_hdr); @@ -992,6 +997,9 @@ static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size) sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr + sub_type_hdr->length * entries); +#else + pr_info("IO link not available for non x86 platforms\n"); +#endif crat_table->num_domains++; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index 5f5b2acedbac..09da91644f9f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -1093,8 +1093,6 @@ static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu) * the GPU device is not already present in the topology device * list then return NULL. This means a new topology device has to * be created for this GPU. - * TODO: Rather than assiging @gpu to first topology device withtout - * gpu attached, it will better to have more stringent check. */ static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu) { @@ -1102,12 +1100,20 @@ static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu) struct kfd_topology_device *out_dev = NULL; down_write(&topology_lock); - list_for_each_entry(dev, &topology_device_list, list) + list_for_each_entry(dev, &topology_device_list, list) { + /* Discrete GPUs need their own topology device list + * entries. Don't assign them to CPU/APU nodes. + */ + if (!gpu->device_info->needs_iommu_device && + dev->node_props.cpu_cores_count) + continue; + if (!dev->gpu && (dev->node_props.simd_count > 0)) { dev->gpu = gpu; out_dev = dev; break; } + } up_write(&topology_lock); return out_dev; } @@ -1392,7 +1398,6 @@ int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_dev **kdev) static int kfd_cpumask_to_apic_id(const struct cpumask *cpumask) { - const struct cpuinfo_x86 *cpuinfo; int first_cpu_of_numa_node; if (!cpumask || cpumask == cpu_none_mask) @@ -1400,9 +1405,11 @@ static int kfd_cpumask_to_apic_id(const struct cpumask *cpumask) first_cpu_of_numa_node = cpumask_first(cpumask); if (first_cpu_of_numa_node >= nr_cpu_ids) return -1; - cpuinfo = &cpu_data(first_cpu_of_numa_node); - - return cpuinfo->apicid; +#ifdef CONFIG_X86_64 + return cpu_data(first_cpu_of_numa_node).apicid; +#else + return first_cpu_of_numa_node; +#endif } /* kfd_numa_node_to_apic_id - Returns the APIC ID of the first logical processor diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 34f35e9a3c46..f4fa40c387d3 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -1772,7 +1772,7 @@ static int amdgpu_dm_backlight_update_status(struct backlight_device *bd) + caps.min_input_signal * 0x101; if (dc_link_set_backlight_level(dm->backlight_link, - brightness, 0, 0)) + brightness, 0)) return 0; else return 1; @@ -5933,7 +5933,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { if (!drm_atomic_crtc_needs_modeset(new_crtc_state) && !new_crtc_state->color_mgmt_changed && - !new_crtc_state->vrr_enabled) + old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled) continue; if (!new_crtc_state->enable) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 52deacf39841..b0265dbebd4c 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -2190,8 +2190,7 @@ int dc_link_get_backlight_level(const struct dc_link *link) bool dc_link_set_backlight_level(const struct dc_link *link, uint32_t backlight_pwm_u16_16, - uint32_t frame_ramp, - const struct dc_stream_state *stream) + uint32_t frame_ramp) { struct dc *core_dc = link->ctx->dc; struct abm *abm = core_dc->res_pool->abm; @@ -2206,10 +2205,6 @@ bool dc_link_set_backlight_level(const struct dc_link *link, (abm->funcs->set_backlight_level_pwm == NULL)) return false; - if (stream) - ((struct dc_stream_state *)stream)->bl_pwm_level = - backlight_pwm_u16_16; - use_smooth_brightness = dmcu->funcs->is_dmcu_initialized(dmcu); DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n", @@ -2637,11 +2632,6 @@ void core_link_enable_stream( if (dc_is_dp_signal(pipe_ctx->stream->signal)) enable_stream_features(pipe_ctx); - - dc_link_set_backlight_level(pipe_ctx->stream->sink->link, - pipe_ctx->stream->bl_pwm_level, - 0, - pipe_ctx->stream); } } diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h index 29f19d57ff7a..b2243e0dad1f 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_link.h +++ b/drivers/gpu/drm/amd/display/dc/dc_link.h @@ -146,8 +146,7 @@ static inline struct dc_link *dc_get_link_at_index(struct dc *dc, uint32_t link_ */ bool dc_link_set_backlight_level(const struct dc_link *dc_link, uint32_t backlight_pwm_u16_16, - uint32_t frame_ramp, - const struct dc_stream_state *stream); + uint32_t frame_ramp); int dc_link_get_backlight_level(const struct dc_link *dc_link); diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index be34d638e15d..d70c9e1cda3d 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h @@ -91,7 +91,6 @@ struct dc_stream_state { /* DMCU info */ unsigned int abm_level; - unsigned int bl_pwm_level; /* from core_stream struct */ struct dc_context *ctx; diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 4bf24758217f..8f09b8625c5d 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -1000,7 +1000,7 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx) pipe_ctx->stream_res.audio->funcs->az_enable(pipe_ctx->stream_res.audio); - if (num_audio == 1 && pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL) + if (num_audio >= 1 && pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL) /*this is the first audio. apply the PME w/a in order to wake AZ from D3*/ pp_smu->set_pme_wa_enable(&pp_smu->pp_smu); /* un-mute audio */ @@ -1017,6 +1017,8 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option) pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control( pipe_ctx->stream_res.stream_enc, true); if (pipe_ctx->stream_res.audio) { + struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu; + if (option != KEEP_ACQUIRED_RESOURCE || !dc->debug.az_endpoint_mute_only) { /*only disalbe az_endpoint if power down or free*/ @@ -1036,6 +1038,9 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option) update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, pipe_ctx->stream_res.audio, false); pipe_ctx->stream_res.audio = NULL; } + if (pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL) + /*this is the first audio. apply the PME w/a in order to wake AZ from D3*/ + pp_smu->set_pme_wa_enable(&pp_smu->pp_smu); /* TODO: notify audio driver for if audio modes list changed * add audio mode list change flag */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c index dcb3c5530236..cd1ebe57ed59 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c @@ -463,7 +463,7 @@ void dpp1_set_cursor_position( if (src_y_offset >= (int)param->viewport.height) cur_en = 0; /* not visible beyond bottom edge*/ - if (src_y_offset < 0) + if (src_y_offset + (int)height <= 0) cur_en = 0; /* not visible beyond top edge*/ REG_UPDATE(CURSOR0_CONTROL, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c index 345af015d061..d1acd7165bc8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c @@ -1140,7 +1140,7 @@ void hubp1_cursor_set_position( if (src_y_offset >= (int)param->viewport.height) cur_en = 0; /* not visible beyond bottom edge*/ - if (src_y_offset < 0) //+ (int)hubp->curs_attr.height + if (src_y_offset + (int)hubp->curs_attr.height <= 0) cur_en = 0; /* not visible beyond top edge*/ if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0) diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 91e015e14355..58a12ddf12f3 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -2355,29 +2355,22 @@ static void dcn10_apply_ctx_for_surface( top_pipe_to_program->plane_state->update_flags.bits.full_update) for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; - + tg = pipe_ctx->stream_res.tg; /* Skip inactive pipes and ones already updated */ if (!pipe_ctx->stream || pipe_ctx->stream == stream - || !pipe_ctx->plane_state) + || !pipe_ctx->plane_state + || !tg->funcs->is_tg_enabled(tg)) continue; - pipe_ctx->stream_res.tg->funcs->lock(pipe_ctx->stream_res.tg); + tg->funcs->lock(tg); pipe_ctx->plane_res.hubp->funcs->hubp_setup_interdependent( pipe_ctx->plane_res.hubp, &pipe_ctx->dlg_regs, &pipe_ctx->ttu_regs); - } - - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; - if (!pipe_ctx->stream || pipe_ctx->stream == stream - || !pipe_ctx->plane_state) - continue; - - dcn10_pipe_control_lock(dc, pipe_ctx, false); - } + tg->funcs->unlock(tg); + } if (num_planes == 0) false_optc_underflow_wa(dc, stream, tg); diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c index 00f63b7dd32f..c11a443dcbc8 100644 --- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c +++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c @@ -57,6 +57,7 @@ static const unsigned char abm_config[abm_defines_max_config][abm_defines_max_le #define NUM_POWER_FN_SEGS 8 #define NUM_BL_CURVE_SEGS 16 +#pragma pack(push, 1) /* NOTE: iRAM is 256B in size */ struct iram_table_v_2 { /* flags */ @@ -100,6 +101,7 @@ struct iram_table_v_2 { uint8_t dummy8; /* 0xfe */ uint8_t dummy9; /* 0xff */ }; +#pragma pack(pop) static uint16_t backlight_8_to_16(unsigned int backlight_8bit) { diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c index 54364444ecd1..0c8212902275 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c @@ -753,6 +753,22 @@ static int vega12_init_smc_table(struct pp_hwmgr *hwmgr) return 0; } +static int vega12_run_acg_btc(struct pp_hwmgr *hwmgr) +{ + uint32_t result; + + PP_ASSERT_WITH_CODE( + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc) == 0, + "[Run_ACG_BTC] Attempt to run ACG BTC failed!", + return -EINVAL); + + result = smum_get_argument(hwmgr); + PP_ASSERT_WITH_CODE(result == 1, + "Failed to run ACG BTC!", return -EINVAL); + + return 0; +} + static int vega12_set_allowed_featuresmask(struct pp_hwmgr *hwmgr) { struct vega12_hwmgr *data = @@ -931,6 +947,11 @@ static int vega12_enable_dpm_tasks(struct pp_hwmgr *hwmgr) "Failed to initialize SMC table!", result = tmp_result); + tmp_result = vega12_run_acg_btc(hwmgr); + PP_ASSERT_WITH_CODE(!tmp_result, + "Failed to run ACG BTC!", + result = tmp_result); + result = vega12_enable_all_smu_features(hwmgr); PP_ASSERT_WITH_CODE(!result, "Failed to enable all smu features!", diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index b5475c91e2ef..e9f343b124b0 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -2799,6 +2799,7 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt) MMIO_DFH(_MMIO(0xe2a0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0xe2b0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0xe2c0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(_MMIO(0x21f0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); return 0; } diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h index 5af11cf1b482..e1675a00df12 100644 --- a/drivers/gpu/drm/i915/gvt/hypercall.h +++ b/drivers/gpu/drm/i915/gvt/hypercall.h @@ -41,7 +41,7 @@ struct intel_gvt_mpt { int (*host_init)(struct device *dev, void *gvt, const void *ops); void (*host_exit)(struct device *dev, void *gvt); int (*attach_vgpu)(void *vgpu, unsigned long *handle); - void (*detach_vgpu)(unsigned long handle); + void (*detach_vgpu)(void *vgpu); int (*inject_msi)(unsigned long handle, u32 addr, u16 data); unsigned long (*from_virt_to_mfn)(void *p); int (*enable_page_track)(unsigned long handle, u64 gfn); diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index c1072143da1d..dd3dfd00f4e6 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -996,7 +996,7 @@ static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma) { unsigned int index; u64 virtaddr; - unsigned long req_size, pgoff = 0; + unsigned long req_size, pgoff, req_start; pgprot_t pg_prot; struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); @@ -1014,7 +1014,17 @@ static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma) pg_prot = vma->vm_page_prot; virtaddr = vma->vm_start; req_size = vma->vm_end - vma->vm_start; - pgoff = vgpu_aperture_pa_base(vgpu) >> PAGE_SHIFT; + pgoff = vma->vm_pgoff & + ((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1); + req_start = pgoff << PAGE_SHIFT; + + if (!intel_vgpu_in_aperture(vgpu, req_start)) + return -EINVAL; + if (req_start + req_size > + vgpu_aperture_offset(vgpu) + vgpu_aperture_sz(vgpu)) + return -EINVAL; + + pgoff = (gvt_aperture_pa_base(vgpu->gvt) >> PAGE_SHIFT) + pgoff; return remap_pfn_range(vma, virtaddr, pgoff, req_size, pg_prot); } @@ -1662,9 +1672,21 @@ static int kvmgt_attach_vgpu(void *vgpu, unsigned long *handle) return 0; } -static void kvmgt_detach_vgpu(unsigned long handle) +static void kvmgt_detach_vgpu(void *p_vgpu) { - /* nothing to do here */ + int i; + struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu; + + if (!vgpu->vdev.region) + return; + + for (i = 0; i < vgpu->vdev.num_regions; i++) + if (vgpu->vdev.region[i].ops->release) + vgpu->vdev.region[i].ops->release(vgpu, + &vgpu->vdev.region[i]); + vgpu->vdev.num_regions = 0; + kfree(vgpu->vdev.region); + vgpu->vdev.region = NULL; } static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data) diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h index 67f19992b226..3ed34123d8d1 100644 --- a/drivers/gpu/drm/i915/gvt/mpt.h +++ b/drivers/gpu/drm/i915/gvt/mpt.h @@ -101,7 +101,7 @@ static inline void intel_gvt_hypervisor_detach_vgpu(struct intel_vgpu *vgpu) if (!intel_gvt_host.mpt->detach_vgpu) return; - intel_gvt_host.mpt->detach_vgpu(vgpu->handle); + intel_gvt_host.mpt->detach_vgpu(vgpu); } #define MSI_CAP_CONTROL(offset) (offset + 2) diff --git a/drivers/gpu/drm/meson/meson_crtc.c b/drivers/gpu/drm/meson/meson_crtc.c index 75d97f1b2e8f..4f5c67f70c4d 100644 --- a/drivers/gpu/drm/meson/meson_crtc.c +++ b/drivers/gpu/drm/meson/meson_crtc.c @@ -46,7 +46,6 @@ struct meson_crtc { struct drm_crtc base; struct drm_pending_vblank_event *event; struct meson_drm *priv; - bool enabled; }; #define to_meson_crtc(x) container_of(x, struct meson_crtc, base) @@ -82,7 +81,8 @@ static const struct drm_crtc_funcs meson_crtc_funcs = { }; -static void meson_crtc_enable(struct drm_crtc *crtc) +static void meson_crtc_atomic_enable(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) { struct meson_crtc *meson_crtc = to_meson_crtc(crtc); struct drm_crtc_state *crtc_state = crtc->state; @@ -108,20 +108,6 @@ static void meson_crtc_enable(struct drm_crtc *crtc) drm_crtc_vblank_on(crtc); - meson_crtc->enabled = true; -} - -static void meson_crtc_atomic_enable(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) -{ - struct meson_crtc *meson_crtc = to_meson_crtc(crtc); - struct meson_drm *priv = meson_crtc->priv; - - DRM_DEBUG_DRIVER("\n"); - - if (!meson_crtc->enabled) - meson_crtc_enable(crtc); - priv->viu.osd1_enabled = true; } @@ -153,8 +139,6 @@ static void meson_crtc_atomic_disable(struct drm_crtc *crtc, crtc->state->event = NULL; } - - meson_crtc->enabled = false; } static void meson_crtc_atomic_begin(struct drm_crtc *crtc, @@ -163,9 +147,6 @@ static void meson_crtc_atomic_begin(struct drm_crtc *crtc, struct meson_crtc *meson_crtc = to_meson_crtc(crtc); unsigned long flags; - if (crtc->state->enable && !meson_crtc->enabled) - meson_crtc_enable(crtc); - if (crtc->state->event) { WARN_ON(drm_crtc_vblank_get(crtc) != 0); diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c index 3ee4d4a4ecba..12ff47b13668 100644 --- a/drivers/gpu/drm/meson/meson_drv.c +++ b/drivers/gpu/drm/meson/meson_drv.c @@ -75,6 +75,10 @@ static const struct drm_mode_config_funcs meson_mode_config_funcs = { .fb_create = drm_gem_fb_create, }; +static const struct drm_mode_config_helper_funcs meson_mode_config_helpers = { + .atomic_commit_tail = drm_atomic_helper_commit_tail_rpm, +}; + static irqreturn_t meson_irq(int irq, void *arg) { struct drm_device *dev = arg; @@ -266,6 +270,7 @@ static int meson_drv_bind_master(struct device *dev, bool has_components) drm->mode_config.max_width = 3840; drm->mode_config.max_height = 2160; drm->mode_config.funcs = &meson_mode_config_funcs; + drm->mode_config.helper_private = &meson_mode_config_helpers; /* Hardware Initialization */ @@ -388,8 +393,10 @@ static int meson_probe_remote(struct platform_device *pdev, remote_node = of_graph_get_remote_port_parent(ep); if (!remote_node || remote_node == parent || /* Ignore parent endpoint */ - !of_device_is_available(remote_node)) + !of_device_is_available(remote_node)) { + of_node_put(remote_node); continue; + } count += meson_probe_remote(pdev, match, remote, remote_node); @@ -408,10 +415,13 @@ static int meson_drv_probe(struct platform_device *pdev) for_each_endpoint_of_node(np, ep) { remote = of_graph_get_remote_port_parent(ep); - if (!remote || !of_device_is_available(remote)) + if (!remote || !of_device_is_available(remote)) { + of_node_put(remote); continue; + } count += meson_probe_remote(pdev, &match, np, remote); + of_node_put(remote); } if (count && !match) diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c index bfbc9341e0c2..d9edb5785813 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c @@ -2435,6 +2435,38 @@ nv140_chipset = { }; static const struct nvkm_device_chip +nv162_chipset = { + .name = "TU102", + .bar = tu104_bar_new, + .bios = nvkm_bios_new, + .bus = gf100_bus_new, + .devinit = tu104_devinit_new, + .fault = tu104_fault_new, + .fb = gv100_fb_new, + .fuse = gm107_fuse_new, + .gpio = gk104_gpio_new, + .i2c = gm200_i2c_new, + .ibus = gm200_ibus_new, + .imem = nv50_instmem_new, + .ltc = gp102_ltc_new, + .mc = tu104_mc_new, + .mmu = tu104_mmu_new, + .pci = gp100_pci_new, + .pmu = gp102_pmu_new, + .therm = gp100_therm_new, + .timer = gk20a_timer_new, + .top = gk104_top_new, + .ce[0] = tu104_ce_new, + .ce[1] = tu104_ce_new, + .ce[2] = tu104_ce_new, + .ce[3] = tu104_ce_new, + .ce[4] = tu104_ce_new, + .disp = tu104_disp_new, + .dma = gv100_dma_new, + .fifo = tu104_fifo_new, +}; + +static const struct nvkm_device_chip nv164_chipset = { .name = "TU104", .bar = tu104_bar_new, @@ -2950,6 +2982,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func, case 0x138: device->chip = &nv138_chipset; break; case 0x13b: device->chip = &nv13b_chipset; break; case 0x140: device->chip = &nv140_chipset; break; + case 0x162: device->chip = &nv162_chipset; break; case 0x164: device->chip = &nv164_chipset; break; case 0x166: device->chip = &nv166_chipset; break; default: diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c index 13c8a662f9b4..ccb090f3ab30 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.c +++ b/drivers/gpu/drm/qxl/qxl_drv.c @@ -250,14 +250,10 @@ static struct drm_driver qxl_driver = { #if defined(CONFIG_DEBUG_FS) .debugfs_init = qxl_debugfs_init, #endif - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_export = drm_gem_prime_export, .gem_prime_import = drm_gem_prime_import, .gem_prime_pin = qxl_gem_prime_pin, .gem_prime_unpin = qxl_gem_prime_unpin, - .gem_prime_get_sg_table = qxl_gem_prime_get_sg_table, - .gem_prime_import_sg_table = qxl_gem_prime_import_sg_table, .gem_prime_vmap = qxl_gem_prime_vmap, .gem_prime_vunmap = qxl_gem_prime_vunmap, .gem_prime_mmap = qxl_gem_prime_mmap, diff --git a/drivers/gpu/drm/qxl/qxl_prime.c b/drivers/gpu/drm/qxl/qxl_prime.c index a55dece118b2..df65d3c1a7b8 100644 --- a/drivers/gpu/drm/qxl/qxl_prime.c +++ b/drivers/gpu/drm/qxl/qxl_prime.c @@ -38,20 +38,6 @@ void qxl_gem_prime_unpin(struct drm_gem_object *obj) WARN_ONCE(1, "not implemented"); } -struct sg_table *qxl_gem_prime_get_sg_table(struct drm_gem_object *obj) -{ - WARN_ONCE(1, "not implemented"); - return ERR_PTR(-ENOSYS); -} - -struct drm_gem_object *qxl_gem_prime_import_sg_table( - struct drm_device *dev, struct dma_buf_attachment *attach, - struct sg_table *table) -{ - WARN_ONCE(1, "not implemented"); - return ERR_PTR(-ENOSYS); -} - void *qxl_gem_prime_vmap(struct drm_gem_object *obj) { WARN_ONCE(1, "not implemented"); diff --git a/drivers/gpu/drm/rockchip/rockchip_rgb.c b/drivers/gpu/drm/rockchip/rockchip_rgb.c index 96ac1458a59c..37f93022a106 100644 --- a/drivers/gpu/drm/rockchip/rockchip_rgb.c +++ b/drivers/gpu/drm/rockchip/rockchip_rgb.c @@ -113,8 +113,10 @@ struct rockchip_rgb *rockchip_rgb_init(struct device *dev, child_count++; ret = drm_of_find_panel_or_bridge(dev->of_node, 0, endpoint_id, &panel, &bridge); - if (!ret) + if (!ret) { + of_node_put(endpoint); break; + } } of_node_put(port); diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c index 9e9255ee59cd..a021bab11a4f 100644 --- a/drivers/gpu/drm/sun4i/sun4i_backend.c +++ b/drivers/gpu/drm/sun4i/sun4i_backend.c @@ -786,17 +786,18 @@ static struct sun4i_frontend *sun4i_backend_find_frontend(struct sun4i_drv *drv, remote = of_graph_get_remote_port_parent(ep); if (!remote) continue; + of_node_put(remote); /* does this node match any registered engines? */ list_for_each_entry(frontend, &drv->frontend_list, list) { if (remote == frontend->node) { - of_node_put(remote); of_node_put(port); + of_node_put(ep); return frontend; } } } - + of_node_put(port); return ERR_PTR(-EINVAL); } diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c index f7f32a885af7..2d1aaca49105 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.c +++ b/drivers/gpu/drm/virtio/virtgpu_drv.c @@ -127,14 +127,10 @@ static struct drm_driver driver = { #if defined(CONFIG_DEBUG_FS) .debugfs_init = virtio_gpu_debugfs_init, #endif - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_export = drm_gem_prime_export, .gem_prime_import = drm_gem_prime_import, .gem_prime_pin = virtgpu_gem_prime_pin, .gem_prime_unpin = virtgpu_gem_prime_unpin, - .gem_prime_get_sg_table = virtgpu_gem_prime_get_sg_table, - .gem_prime_import_sg_table = virtgpu_gem_prime_import_sg_table, .gem_prime_vmap = virtgpu_gem_prime_vmap, .gem_prime_vunmap = virtgpu_gem_prime_vunmap, .gem_prime_mmap = virtgpu_gem_prime_mmap, diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h index 1deb41d42ea4..0c15000f926e 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.h +++ b/drivers/gpu/drm/virtio/virtgpu_drv.h @@ -372,10 +372,6 @@ int virtio_gpu_object_wait(struct virtio_gpu_object *bo, bool no_wait); /* virtgpu_prime.c */ int virtgpu_gem_prime_pin(struct drm_gem_object *obj); void virtgpu_gem_prime_unpin(struct drm_gem_object *obj); -struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj); -struct drm_gem_object *virtgpu_gem_prime_import_sg_table( - struct drm_device *dev, struct dma_buf_attachment *attach, - struct sg_table *sgt); void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj); void virtgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); int virtgpu_gem_prime_mmap(struct drm_gem_object *obj, diff --git a/drivers/gpu/drm/virtio/virtgpu_prime.c b/drivers/gpu/drm/virtio/virtgpu_prime.c index 86ce0ae93f59..c59ec34c80a5 100644 --- a/drivers/gpu/drm/virtio/virtgpu_prime.c +++ b/drivers/gpu/drm/virtio/virtgpu_prime.c @@ -39,20 +39,6 @@ void virtgpu_gem_prime_unpin(struct drm_gem_object *obj) WARN_ONCE(1, "not implemented"); } -struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj) -{ - WARN_ONCE(1, "not implemented"); - return ERR_PTR(-ENODEV); -} - -struct drm_gem_object *virtgpu_gem_prime_import_sg_table( - struct drm_device *dev, struct dma_buf_attachment *attach, - struct sg_table *table) -{ - WARN_ONCE(1, "not implemented"); - return ERR_PTR(-ENODEV); -} - void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj) { struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); diff --git a/drivers/gpu/vga/Kconfig b/drivers/gpu/vga/Kconfig index b677e5d524e6..d5f1d8e1c6f8 100644 --- a/drivers/gpu/vga/Kconfig +++ b/drivers/gpu/vga/Kconfig @@ -21,6 +21,7 @@ config VGA_SWITCHEROO bool "Laptop Hybrid Graphics - GPU switching support" depends on X86 depends on ACPI + depends on PCI select VGA_ARB help Many laptops released in 2008/9/10 have two GPUs with a multiplexer diff --git a/drivers/hwmon/lm80.c b/drivers/hwmon/lm80.c index 0e30fa00204c..f9b8e3e23a8e 100644 --- a/drivers/hwmon/lm80.c +++ b/drivers/hwmon/lm80.c @@ -393,8 +393,10 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr, } rv = lm80_read_value(client, LM80_REG_FANDIV); - if (rv < 0) + if (rv < 0) { + mutex_unlock(&data->update_lock); return rv; + } reg = (rv & ~(3 << (2 * (nr + 1)))) | (data->fan_div[nr] << (2 * (nr + 1))); lm80_write_value(client, LM80_REG_FANDIV, reg); diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c index c3040079b1cb..4adec4ab7d06 100644 --- a/drivers/hwmon/nct6775.c +++ b/drivers/hwmon/nct6775.c @@ -44,8 +44,8 @@ * nct6796d 14 7 7 2+6 0xd420 0xc1 0x5ca3 * nct6797d 14 7 7 2+6 0xd450 0xc1 0x5ca3 * (0xd451) - * nct6798d 14 7 7 2+6 0xd458 0xc1 0x5ca3 - * (0xd459) + * nct6798d 14 7 7 2+6 0xd428 0xc1 0x5ca3 + * (0xd429) * * #temp lists the number of monitored temperature sources (first value) plus * the number of directly connectable temperature sensors (second value). @@ -138,7 +138,7 @@ MODULE_PARM_DESC(fan_debounce, "Enable debouncing for fan RPM signal"); #define SIO_NCT6795_ID 0xd350 #define SIO_NCT6796_ID 0xd420 #define SIO_NCT6797_ID 0xd450 -#define SIO_NCT6798_ID 0xd458 +#define SIO_NCT6798_ID 0xd428 #define SIO_ID_MASK 0xFFF8 enum pwm_enable { off, manual, thermal_cruise, speed_cruise, sf3, sf4 }; @@ -4508,7 +4508,8 @@ static int __maybe_unused nct6775_resume(struct device *dev) if (data->kind == nct6791 || data->kind == nct6792 || data->kind == nct6793 || data->kind == nct6795 || - data->kind == nct6796) + data->kind == nct6796 || data->kind == nct6797 || + data->kind == nct6798) nct6791_enable_io_mapping(sioreg); superio_exit(sioreg); @@ -4644,7 +4645,8 @@ static int __init nct6775_find(int sioaddr, struct nct6775_sio_data *sio_data) if (sio_data->kind == nct6791 || sio_data->kind == nct6792 || sio_data->kind == nct6793 || sio_data->kind == nct6795 || - sio_data->kind == nct6796) + sio_data->kind == nct6796 || sio_data->kind == nct6797 || + sio_data->kind == nct6798) nct6791_enable_io_mapping(sioaddr); superio_exit(sioaddr); diff --git a/drivers/hwmon/occ/common.c b/drivers/hwmon/occ/common.c index 423903f87955..391118c8aae8 100644 --- a/drivers/hwmon/occ/common.c +++ b/drivers/hwmon/occ/common.c @@ -380,8 +380,8 @@ static ssize_t occ_show_power_1(struct device *dev, val *= 1000000ULL; break; case 2: - val = get_unaligned_be32(&power->update_tag) * - occ->powr_sample_time_us; + val = (u64)get_unaligned_be32(&power->update_tag) * + occ->powr_sample_time_us; break; case 3: val = get_unaligned_be16(&power->value) * 1000000ULL; @@ -425,8 +425,8 @@ static ssize_t occ_show_power_2(struct device *dev, &power->update_tag); break; case 2: - val = get_unaligned_be32(&power->update_tag) * - occ->powr_sample_time_us; + val = (u64)get_unaligned_be32(&power->update_tag) * + occ->powr_sample_time_us; break; case 3: val = get_unaligned_be16(&power->value) * 1000000ULL; @@ -463,8 +463,8 @@ static ssize_t occ_show_power_a0(struct device *dev, &power->system.update_tag); break; case 2: - val = get_unaligned_be32(&power->system.update_tag) * - occ->powr_sample_time_us; + val = (u64)get_unaligned_be32(&power->system.update_tag) * + occ->powr_sample_time_us; break; case 3: val = get_unaligned_be16(&power->system.value) * 1000000ULL; @@ -477,8 +477,8 @@ static ssize_t occ_show_power_a0(struct device *dev, &power->proc.update_tag); break; case 6: - val = get_unaligned_be32(&power->proc.update_tag) * - occ->powr_sample_time_us; + val = (u64)get_unaligned_be32(&power->proc.update_tag) * + occ->powr_sample_time_us; break; case 7: val = get_unaligned_be16(&power->proc.value) * 1000000ULL; @@ -491,8 +491,8 @@ static ssize_t occ_show_power_a0(struct device *dev, &power->vdd.update_tag); break; case 10: - val = get_unaligned_be32(&power->vdd.update_tag) * - occ->powr_sample_time_us; + val = (u64)get_unaligned_be32(&power->vdd.update_tag) * + occ->powr_sample_time_us; break; case 11: val = get_unaligned_be16(&power->vdd.value) * 1000000ULL; @@ -505,8 +505,8 @@ static ssize_t occ_show_power_a0(struct device *dev, &power->vdn.update_tag); break; case 14: - val = get_unaligned_be32(&power->vdn.update_tag) * - occ->powr_sample_time_us; + val = (u64)get_unaligned_be32(&power->vdn.update_tag) * + occ->powr_sample_time_us; break; case 15: val = get_unaligned_be16(&power->vdn.value) * 1000000ULL; diff --git a/drivers/hwmon/tmp421.c b/drivers/hwmon/tmp421.c index 8844c9565d2a..7053be59ad2e 100644 --- a/drivers/hwmon/tmp421.c +++ b/drivers/hwmon/tmp421.c @@ -88,7 +88,7 @@ static const struct of_device_id tmp421_of_match[] = { .data = (void *)2 }, { - .compatible = "ti,tmp422", + .compatible = "ti,tmp442", .data = (void *)3 }, { }, diff --git a/drivers/i3c/master/dw-i3c-master.c b/drivers/i3c/master/dw-i3c-master.c index b532e2c9cf5c..f8c00b94817f 100644 --- a/drivers/i3c/master/dw-i3c-master.c +++ b/drivers/i3c/master/dw-i3c-master.c @@ -901,9 +901,6 @@ static int dw_i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev, master->regs + DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index)); - if (!old_dyn_addr) - return 0; - master->addrs[data->index] = dev->info.dyn_addr; return 0; @@ -925,11 +922,11 @@ static int dw_i3c_master_attach_i3c_dev(struct i3c_dev_desc *dev) return -ENOMEM; data->index = pos; - master->addrs[pos] = dev->info.dyn_addr; + master->addrs[pos] = dev->info.dyn_addr ? : dev->info.static_addr; master->free_pos &= ~BIT(pos); i3c_dev_set_master_data(dev, data); - writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(dev->info.dyn_addr), + writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(master->addrs[pos]), master->regs + DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index)); diff --git a/drivers/i3c/master/i3c-master-cdns.c b/drivers/i3c/master/i3c-master-cdns.c index bbd79b8b1a80..8889a4fdb454 100644 --- a/drivers/i3c/master/i3c-master-cdns.c +++ b/drivers/i3c/master/i3c-master-cdns.c @@ -1556,8 +1556,8 @@ static int cdns_i3c_master_probe(struct platform_device *pdev) return PTR_ERR(master->pclk); master->sysclk = devm_clk_get(&pdev->dev, "sysclk"); - if (IS_ERR(master->pclk)) - return PTR_ERR(master->pclk); + if (IS_ERR(master->sysclk)) + return PTR_ERR(master->sysclk); irq = platform_get_irq(pdev, 0); if (irq < 0) diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 63a7cc00bae0..84f077b2b90a 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -494,7 +494,10 @@ static void _cma_attach_to_dev(struct rdma_id_private *id_priv, id_priv->id.route.addr.dev_addr.transport = rdma_node_get_transport(cma_dev->device->node_type); list_add_tail(&id_priv->list, &cma_dev->id_list); - rdma_restrack_kadd(&id_priv->res); + if (id_priv->res.kern_name) + rdma_restrack_kadd(&id_priv->res); + else + rdma_restrack_uadd(&id_priv->res); } static void cma_attach_to_dev(struct rdma_id_private *id_priv, diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c index e600fc23ae62..3c97a8b6bf1e 100644 --- a/drivers/infiniband/core/nldev.c +++ b/drivers/infiniband/core/nldev.c @@ -584,10 +584,6 @@ static int fill_res_pd_entry(struct sk_buff *msg, struct netlink_callback *cb, if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT, atomic_read(&pd->usecnt), RDMA_NLDEV_ATTR_PAD)) goto err; - if ((pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) && - nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY, - pd->unsafe_global_rkey)) - goto err; if (fill_res_name_pid(msg, res)) goto err; diff --git a/drivers/infiniband/core/rdma_core.h b/drivers/infiniband/core/rdma_core.h index be6b8e1257d0..69f8db66925e 100644 --- a/drivers/infiniband/core/rdma_core.h +++ b/drivers/infiniband/core/rdma_core.h @@ -106,6 +106,8 @@ int uverbs_finalize_object(struct ib_uobject *uobj, enum uverbs_obj_access access, bool commit); +int uverbs_output_written(const struct uverbs_attr_bundle *bundle, size_t idx); + void setup_ufile_idr_uobject(struct ib_uverbs_file *ufile); void release_ufile_idr_uobject(struct ib_uverbs_file *ufile); diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 6b12cc5f97b2..3317300ab036 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -60,6 +60,10 @@ static int uverbs_response(struct uverbs_attr_bundle *attrs, const void *resp, { int ret; + if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_CORE_OUT)) + return uverbs_copy_to_struct_or_zero( + attrs, UVERBS_ATTR_CORE_OUT, resp, resp_len); + if (copy_to_user(attrs->ucore.outbuf, resp, min(attrs->ucore.outlen, resp_len))) return -EFAULT; @@ -1181,6 +1185,9 @@ static int ib_uverbs_poll_cq(struct uverbs_attr_bundle *attrs) goto out_put; } + if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_CORE_OUT)) + ret = uverbs_output_written(attrs, UVERBS_ATTR_CORE_OUT); + ret = 0; out_put: @@ -2012,8 +2019,10 @@ static int ib_uverbs_post_send(struct uverbs_attr_bundle *attrs) return -ENOMEM; qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs); - if (!qp) + if (!qp) { + ret = -EINVAL; goto out; + } is_ud = qp->qp_type == IB_QPT_UD; sg_ind = 0; diff --git a/drivers/infiniband/core/uverbs_ioctl.c b/drivers/infiniband/core/uverbs_ioctl.c index 8c81ff698052..0ca04d224015 100644 --- a/drivers/infiniband/core/uverbs_ioctl.c +++ b/drivers/infiniband/core/uverbs_ioctl.c @@ -144,6 +144,21 @@ static bool uverbs_is_attr_cleared(const struct ib_uverbs_attr *uattr, 0, uattr->len - len); } +static int uverbs_set_output(const struct uverbs_attr_bundle *bundle, + const struct uverbs_attr *attr) +{ + struct bundle_priv *pbundle = + container_of(bundle, struct bundle_priv, bundle); + u16 flags; + + flags = pbundle->uattrs[attr->ptr_attr.uattr_idx].flags | + UVERBS_ATTR_F_VALID_OUTPUT; + if (put_user(flags, + &pbundle->user_attrs[attr->ptr_attr.uattr_idx].flags)) + return -EFAULT; + return 0; +} + static int uverbs_process_idrs_array(struct bundle_priv *pbundle, const struct uverbs_api_attr *attr_uapi, struct uverbs_objs_arr_attr *attr, @@ -456,6 +471,19 @@ static int ib_uverbs_run_method(struct bundle_priv *pbundle, } /* + * Until the drivers are revised to use the bundle directly we have to + * assume that the driver wrote to its UHW_OUT and flag userspace + * appropriately. + */ + if (!ret && pbundle->method_elm->has_udata) { + const struct uverbs_attr *attr = + uverbs_attr_get(&pbundle->bundle, UVERBS_ATTR_UHW_OUT); + + if (!IS_ERR(attr)) + ret = uverbs_set_output(&pbundle->bundle, attr); + } + + /* * EPROTONOSUPPORT is ONLY to be returned if the ioctl framework can * not invoke the method because the request is not supported. No * other cases should return this code. @@ -706,10 +734,7 @@ void uverbs_fill_udata(struct uverbs_attr_bundle *bundle, int uverbs_copy_to(const struct uverbs_attr_bundle *bundle, size_t idx, const void *from, size_t size) { - struct bundle_priv *pbundle = - container_of(bundle, struct bundle_priv, bundle); const struct uverbs_attr *attr = uverbs_attr_get(bundle, idx); - u16 flags; size_t min_size; if (IS_ERR(attr)) @@ -719,16 +744,25 @@ int uverbs_copy_to(const struct uverbs_attr_bundle *bundle, size_t idx, if (copy_to_user(u64_to_user_ptr(attr->ptr_attr.data), from, min_size)) return -EFAULT; - flags = pbundle->uattrs[attr->ptr_attr.uattr_idx].flags | - UVERBS_ATTR_F_VALID_OUTPUT; - if (put_user(flags, - &pbundle->user_attrs[attr->ptr_attr.uattr_idx].flags)) - return -EFAULT; - - return 0; + return uverbs_set_output(bundle, attr); } EXPORT_SYMBOL(uverbs_copy_to); + +/* + * This is only used if the caller has directly used copy_to_use to write the + * data. It signals to user space that the buffer is filled in. + */ +int uverbs_output_written(const struct uverbs_attr_bundle *bundle, size_t idx) +{ + const struct uverbs_attr *attr = uverbs_attr_get(bundle, idx); + + if (IS_ERR(attr)) + return PTR_ERR(attr); + + return uverbs_set_output(bundle, attr); +} + int _uverbs_get_const(s64 *to, const struct uverbs_attr_bundle *attrs_bundle, size_t idx, s64 lower_bound, u64 upper_bound, s64 *def_val) @@ -757,8 +791,10 @@ int uverbs_copy_to_struct_or_zero(const struct uverbs_attr_bundle *bundle, { const struct uverbs_attr *attr = uverbs_attr_get(bundle, idx); - if (clear_user(u64_to_user_ptr(attr->ptr_attr.data), - attr->ptr_attr.len)) - return -EFAULT; + if (size < attr->ptr_attr.len) { + if (clear_user(u64_to_user_ptr(attr->ptr_attr.data) + size, + attr->ptr_attr.len - size)) + return -EFAULT; + } return uverbs_copy_to(bundle, idx, from, size); } diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index fb0007aa0c27..2890a77339e1 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c @@ -690,6 +690,7 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf, buf += sizeof(hdr); + memset(bundle.attr_present, 0, sizeof(bundle.attr_present)); bundle.ufile = file; if (!method_elm->is_ex) { size_t in_len = hdr.in_words * 4 - sizeof(hdr); diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c index 82cb6b71ac7c..e3e9dd54caa2 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.c +++ b/drivers/infiniband/hw/mthca/mthca_provider.c @@ -534,7 +534,7 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd, { struct mthca_ucontext *context; - qp = kmalloc(sizeof *qp, GFP_KERNEL); + qp = kzalloc(sizeof(*qp), GFP_KERNEL); if (!qp) return ERR_PTR(-ENOMEM); @@ -600,7 +600,7 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd, if (udata) return ERR_PTR(-EINVAL); - qp = kmalloc(sizeof (struct mthca_sqp), GFP_KERNEL); + qp = kzalloc(sizeof(struct mthca_sqp), GFP_KERNEL); if (!qp) return ERR_PTR(-ENOMEM); diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h index 42b8685c997e..3c633ab58052 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h @@ -427,7 +427,40 @@ static inline enum ib_qp_state pvrdma_qp_state_to_ib(enum pvrdma_qp_state state) static inline enum pvrdma_wr_opcode ib_wr_opcode_to_pvrdma(enum ib_wr_opcode op) { - return (enum pvrdma_wr_opcode)op; + switch (op) { + case IB_WR_RDMA_WRITE: + return PVRDMA_WR_RDMA_WRITE; + case IB_WR_RDMA_WRITE_WITH_IMM: + return PVRDMA_WR_RDMA_WRITE_WITH_IMM; + case IB_WR_SEND: + return PVRDMA_WR_SEND; + case IB_WR_SEND_WITH_IMM: + return PVRDMA_WR_SEND_WITH_IMM; + case IB_WR_RDMA_READ: + return PVRDMA_WR_RDMA_READ; + case IB_WR_ATOMIC_CMP_AND_SWP: + return PVRDMA_WR_ATOMIC_CMP_AND_SWP; + case IB_WR_ATOMIC_FETCH_AND_ADD: + return PVRDMA_WR_ATOMIC_FETCH_AND_ADD; + case IB_WR_LSO: + return PVRDMA_WR_LSO; + case IB_WR_SEND_WITH_INV: + return PVRDMA_WR_SEND_WITH_INV; + case IB_WR_RDMA_READ_WITH_INV: + return PVRDMA_WR_RDMA_READ_WITH_INV; + case IB_WR_LOCAL_INV: + return PVRDMA_WR_LOCAL_INV; + case IB_WR_REG_MR: + return PVRDMA_WR_FAST_REG_MR; + case IB_WR_MASKED_ATOMIC_CMP_AND_SWP: + return PVRDMA_WR_MASKED_ATOMIC_CMP_AND_SWP; + case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD: + return PVRDMA_WR_MASKED_ATOMIC_FETCH_AND_ADD; + case IB_WR_REG_SIG_MR: + return PVRDMA_WR_REG_SIG_MR; + default: + return PVRDMA_WR_ERROR; + } } static inline enum ib_wc_status pvrdma_wc_status_to_ib( diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c index 3acf74cbe266..1ec3646087ba 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c @@ -721,6 +721,12 @@ int pvrdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) wqe_hdr->ex.imm_data = wr->ex.imm_data; + if (unlikely(wqe_hdr->opcode == PVRDMA_WR_ERROR)) { + *bad_wr = wr; + ret = -EINVAL; + goto out; + } + switch (qp->ibqp.qp_type) { case IB_QPT_GSI: case IB_QPT_UD: diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c index 4ac378e48902..40ca1e8fa09f 100644 --- a/drivers/isdn/hardware/avm/b1.c +++ b/drivers/isdn/hardware/avm/b1.c @@ -423,7 +423,7 @@ void b1_parse_version(avmctrl_info *cinfo) int i, j; for (j = 0; j < AVM_MAXVERSION; j++) - cinfo->version[j] = "\0\0" + 1; + cinfo->version[j] = ""; for (i = 0, j = 0; j < AVM_MAXVERSION && i < cinfo->versionlen; j++, i += cinfo->versionbuf[i] + 1) diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c index a2e74feee2b2..fd64df5a57a5 100644 --- a/drivers/leds/leds-lp5523.c +++ b/drivers/leds/leds-lp5523.c @@ -318,7 +318,9 @@ static int lp5523_init_program_engine(struct lp55xx_chip *chip) /* Let the programs run for couple of ms and check the engine status */ usleep_range(3000, 6000); - lp55xx_read(chip, LP5523_REG_STATUS, &status); + ret = lp55xx_read(chip, LP5523_REG_STATUS, &status); + if (ret) + return ret; status &= LP5523_ENG_STATUS_MASK; if (status != LP5523_ENG_STATUS_MASK) { diff --git a/drivers/md/md.c b/drivers/md/md.c index fd4af4de03b4..05ffffb8b769 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -207,15 +207,10 @@ static bool create_on_open = true; struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, struct mddev *mddev) { - struct bio *b; - if (!mddev || !bioset_initialized(&mddev->bio_set)) return bio_alloc(gfp_mask, nr_iovecs); - b = bio_alloc_bioset(gfp_mask, nr_iovecs, &mddev->bio_set); - if (!b) - return NULL; - return b; + return bio_alloc_bioset(gfp_mask, nr_iovecs, &mddev->bio_set); } EXPORT_SYMBOL_GPL(bio_alloc_mddev); diff --git a/drivers/media/platform/vim2m.c b/drivers/media/platform/vim2m.c index d01821a6906a..89d9c4c21037 100644 --- a/drivers/media/platform/vim2m.c +++ b/drivers/media/platform/vim2m.c @@ -807,7 +807,9 @@ static void vim2m_stop_streaming(struct vb2_queue *q) struct vb2_v4l2_buffer *vbuf; unsigned long flags; - cancel_delayed_work_sync(&dev->work_run); + if (v4l2_m2m_get_curr_priv(dev->m2m_dev) == ctx) + cancel_delayed_work_sync(&dev->work_run); + for (;;) { if (V4L2_TYPE_IS_OUTPUT(q->type)) vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx); diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c index 1441a73ce64c..90aad465f9ed 100644 --- a/drivers/media/v4l2-core/v4l2-ioctl.c +++ b/drivers/media/v4l2-core/v4l2-ioctl.c @@ -287,6 +287,7 @@ static void v4l_print_format(const void *arg, bool write_only) const struct v4l2_window *win; const struct v4l2_sdr_format *sdr; const struct v4l2_meta_format *meta; + u32 planes; unsigned i; pr_cont("type=%s", prt_names(p->type, v4l2_type_names)); @@ -317,7 +318,8 @@ static void v4l_print_format(const void *arg, bool write_only) prt_names(mp->field, v4l2_field_names), mp->colorspace, mp->num_planes, mp->flags, mp->ycbcr_enc, mp->quantization, mp->xfer_func); - for (i = 0; i < mp->num_planes; i++) + planes = min_t(u32, mp->num_planes, VIDEO_MAX_PLANES); + for (i = 0; i < planes; i++) printk(KERN_DEBUG "plane %u: bytesperline=%u sizeimage=%u\n", i, mp->plane_fmt[i].bytesperline, mp->plane_fmt[i].sizeimage); @@ -1551,8 +1553,11 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops, if (unlikely(!ops->vidioc_s_fmt_vid_cap_mplane)) break; CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func); + if (p->fmt.pix_mp.num_planes > VIDEO_MAX_PLANES) + break; for (i = 0; i < p->fmt.pix_mp.num_planes; i++) - CLEAR_AFTER_FIELD(p, fmt.pix_mp.plane_fmt[i].bytesperline); + CLEAR_AFTER_FIELD(&p->fmt.pix_mp.plane_fmt[i], + bytesperline); return ops->vidioc_s_fmt_vid_cap_mplane(file, fh, arg); case V4L2_BUF_TYPE_VIDEO_OVERLAY: if (unlikely(!ops->vidioc_s_fmt_vid_overlay)) @@ -1581,8 +1586,11 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops, if (unlikely(!ops->vidioc_s_fmt_vid_out_mplane)) break; CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func); + if (p->fmt.pix_mp.num_planes > VIDEO_MAX_PLANES) + break; for (i = 0; i < p->fmt.pix_mp.num_planes; i++) - CLEAR_AFTER_FIELD(p, fmt.pix_mp.plane_fmt[i].bytesperline); + CLEAR_AFTER_FIELD(&p->fmt.pix_mp.plane_fmt[i], + bytesperline); return ops->vidioc_s_fmt_vid_out_mplane(file, fh, arg); case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: if (unlikely(!ops->vidioc_s_fmt_vid_out_overlay)) @@ -1648,8 +1656,11 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops, if (unlikely(!ops->vidioc_try_fmt_vid_cap_mplane)) break; CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func); + if (p->fmt.pix_mp.num_planes > VIDEO_MAX_PLANES) + break; for (i = 0; i < p->fmt.pix_mp.num_planes; i++) - CLEAR_AFTER_FIELD(p, fmt.pix_mp.plane_fmt[i].bytesperline); + CLEAR_AFTER_FIELD(&p->fmt.pix_mp.plane_fmt[i], + bytesperline); return ops->vidioc_try_fmt_vid_cap_mplane(file, fh, arg); case V4L2_BUF_TYPE_VIDEO_OVERLAY: if (unlikely(!ops->vidioc_try_fmt_vid_overlay)) @@ -1678,8 +1689,11 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops, if (unlikely(!ops->vidioc_try_fmt_vid_out_mplane)) break; CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func); + if (p->fmt.pix_mp.num_planes > VIDEO_MAX_PLANES) + break; for (i = 0; i < p->fmt.pix_mp.num_planes; i++) - CLEAR_AFTER_FIELD(p, fmt.pix_mp.plane_fmt[i].bytesperline); + CLEAR_AFTER_FIELD(&p->fmt.pix_mp.plane_fmt[i], + bytesperline); return ops->vidioc_try_fmt_vid_out_mplane(file, fh, arg); case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: if (unlikely(!ops->vidioc_try_fmt_vid_out_overlay)) diff --git a/drivers/misc/mic/vop/vop_main.c b/drivers/misc/mic/vop/vop_main.c index 6b212c8b78e7..2bfa3a903bf9 100644 --- a/drivers/misc/mic/vop/vop_main.c +++ b/drivers/misc/mic/vop/vop_main.c @@ -394,16 +394,21 @@ static int vop_find_vqs(struct virtio_device *dev, unsigned nvqs, struct _vop_vdev *vdev = to_vopvdev(dev); struct vop_device *vpdev = vdev->vpdev; struct mic_device_ctrl __iomem *dc = vdev->dc; - int i, err, retry; + int i, err, retry, queue_idx = 0; /* We must have this many virtqueues. */ if (nvqs > ioread8(&vdev->desc->num_vq)) return -ENOENT; for (i = 0; i < nvqs; ++i) { + if (!names[i]) { + vqs[i] = NULL; + continue; + } + dev_dbg(_vop_dev(vdev), "%s: %d: %s\n", __func__, i, names[i]); - vqs[i] = vop_find_vq(dev, i, callbacks[i], names[i], + vqs[i] = vop_find_vq(dev, queue_idx++, callbacks[i], names[i], ctx ? ctx[i] : false); if (IS_ERR(vqs[i])) { err = PTR_ERR(vqs[i]); diff --git a/drivers/mtd/nand/raw/denali.c b/drivers/mtd/nand/raw/denali.c index eebac35304c6..6e8edc9375dd 100644 --- a/drivers/mtd/nand/raw/denali.c +++ b/drivers/mtd/nand/raw/denali.c @@ -1322,7 +1322,7 @@ int denali_init(struct denali_nand_info *denali) } /* clk rate info is needed for setup_data_interface */ - if (denali->clk_rate && denali->clk_x_rate) + if (!denali->clk_rate || !denali->clk_x_rate) chip->options |= NAND_KEEP_TIMINGS; chip->legacy.dummy_controller.ops = &denali_controller_ops; diff --git a/drivers/mtd/nand/raw/fsmc_nand.c b/drivers/mtd/nand/raw/fsmc_nand.c index 325b4414dccc..c9149a37f8f0 100644 --- a/drivers/mtd/nand/raw/fsmc_nand.c +++ b/drivers/mtd/nand/raw/fsmc_nand.c @@ -593,23 +593,6 @@ static void fsmc_write_buf_dma(struct fsmc_nand_data *host, const u8 *buf, dma_xfer(host, (void *)buf, len, DMA_TO_DEVICE); } -/* fsmc_select_chip - assert or deassert nCE */ -static void fsmc_ce_ctrl(struct fsmc_nand_data *host, bool assert) -{ - u32 pc = readl(host->regs_va + FSMC_PC); - - if (!assert) - writel_relaxed(pc & ~FSMC_ENABLE, host->regs_va + FSMC_PC); - else - writel_relaxed(pc | FSMC_ENABLE, host->regs_va + FSMC_PC); - - /* - * nCE line changes must be applied before returning from this - * function. - */ - mb(); -} - /* * fsmc_exec_op - hook called by the core to execute NAND operations * @@ -627,8 +610,6 @@ static int fsmc_exec_op(struct nand_chip *chip, const struct nand_operation *op, pr_debug("Executing operation [%d instructions]:\n", op->ninstrs); - fsmc_ce_ctrl(host, true); - for (op_id = 0; op_id < op->ninstrs; op_id++) { instr = &op->instrs[op_id]; @@ -686,8 +667,6 @@ static int fsmc_exec_op(struct nand_chip *chip, const struct nand_operation *op, } } - fsmc_ce_ctrl(host, false); - return ret; } diff --git a/drivers/mtd/nand/raw/jz4740_nand.c b/drivers/mtd/nand/raw/jz4740_nand.c index f92ae5aa2a54..9526d5b23c80 100644 --- a/drivers/mtd/nand/raw/jz4740_nand.c +++ b/drivers/mtd/nand/raw/jz4740_nand.c @@ -260,7 +260,7 @@ static int jz_nand_correct_ecc_rs(struct nand_chip *chip, uint8_t *dat, } static int jz_nand_ioremap_resource(struct platform_device *pdev, - const char *name, struct resource **res, void *__iomem *base) + const char *name, struct resource **res, void __iomem **base) { int ret; diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c index 3b3f88ffab53..c05e4d50d43d 100644 --- a/drivers/net/can/dev.c +++ b/drivers/net/can/dev.c @@ -480,8 +480,6 @@ EXPORT_SYMBOL_GPL(can_put_echo_skb); struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr) { struct can_priv *priv = netdev_priv(dev); - struct sk_buff *skb = priv->echo_skb[idx]; - struct canfd_frame *cf; if (idx >= priv->echo_skb_max) { netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n", @@ -489,20 +487,21 @@ struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 return NULL; } - if (!skb) { - netdev_err(dev, "%s: BUG! Trying to echo non existing skb: can_priv::echo_skb[%u]\n", - __func__, idx); - return NULL; - } + if (priv->echo_skb[idx]) { + /* Using "struct canfd_frame::len" for the frame + * length is supported on both CAN and CANFD frames. + */ + struct sk_buff *skb = priv->echo_skb[idx]; + struct canfd_frame *cf = (struct canfd_frame *)skb->data; + u8 len = cf->len; - /* Using "struct canfd_frame::len" for the frame - * length is supported on both CAN and CANFD frames. - */ - cf = (struct canfd_frame *)skb->data; - *len_ptr = cf->len; - priv->echo_skb[idx] = NULL; + *len_ptr = len; + priv->echo_skb[idx] = NULL; - return skb; + return skb; + } + + return NULL; } /* diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index 0f36eafe3ac1..1c66fb2ad76b 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -1106,7 +1106,7 @@ static int flexcan_chip_start(struct net_device *dev) } } else { /* clear and invalidate unused mailboxes first */ - for (i = FLEXCAN_TX_MB_RESERVED_OFF_FIFO; i <= priv->mb_count; i++) { + for (i = FLEXCAN_TX_MB_RESERVED_OFF_FIFO; i < priv->mb_count; i++) { mb = flexcan_get_mb(priv, i); priv->write(FLEXCAN_MB_CODE_RX_INACTIVE, &mb->can_ctrl); @@ -1432,7 +1432,7 @@ static int flexcan_setup_stop_mode(struct platform_device *pdev) gpr_np = of_find_node_by_phandle(phandle); if (!gpr_np) { dev_dbg(&pdev->dev, "could not find gpr node by phandle\n"); - return PTR_ERR(gpr_np); + return -ENODEV; } priv = netdev_priv(dev); diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c index 02921d877c08..aa1d1f5339d2 100644 --- a/drivers/net/ethernet/altera/altera_tse_main.c +++ b/drivers/net/ethernet/altera/altera_tse_main.c @@ -714,8 +714,10 @@ static struct phy_device *connect_local_phy(struct net_device *dev) phydev = phy_connect(dev, phy_id_fmt, &altera_tse_adjust_link, priv->phy_iface); - if (IS_ERR(phydev)) + if (IS_ERR(phydev)) { netdev_err(dev, "Could not attach to PHY\n"); + phydev = NULL; + } } else { int ret; diff --git a/drivers/net/ethernet/freescale/dpaa2/Kconfig b/drivers/net/ethernet/freescale/dpaa2/Kconfig index 809a155eb193..f6d244c663fd 100644 --- a/drivers/net/ethernet/freescale/dpaa2/Kconfig +++ b/drivers/net/ethernet/freescale/dpaa2/Kconfig @@ -9,8 +9,9 @@ config FSL_DPAA2_ETH config FSL_DPAA2_PTP_CLOCK tristate "Freescale DPAA2 PTP Clock" - depends on FSL_DPAA2_ETH && POSIX_TIMERS - select PTP_1588_CLOCK + depends on FSL_DPAA2_ETH + imply PTP_1588_CLOCK + default y help This driver adds support for using the DPAA2 1588 timer module as a PTP clock. diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index ae0f88bce9aa..2370dc204202 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -3467,7 +3467,7 @@ fec_probe(struct platform_device *pdev) if (ret) goto failed_clk_ipg; - fep->reg_phy = devm_regulator_get(&pdev->dev, "phy"); + fep->reg_phy = devm_regulator_get_optional(&pdev->dev, "phy"); if (!IS_ERR(fep->reg_phy)) { ret = regulator_enable(fep->reg_phy); if (ret) { diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index 098d8764c0ea..dd71d5db7274 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -1313,7 +1313,6 @@ static int ibmveth_poll(struct napi_struct *napi, int budget) unsigned long lpar_rc; u16 mss = 0; -restart_poll: while (frames_processed < budget) { if (!ibmveth_rxq_pending_buffer(adapter)) break; @@ -1401,7 +1400,6 @@ restart_poll: napi_reschedule(napi)) { lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE); - goto restart_poll; } } diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c index db909b6069b5..65f8a4b6ed0c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cq.c +++ b/drivers/net/ethernet/mellanox/mlx4/cq.c @@ -306,14 +306,16 @@ static int mlx4_init_user_cqes(void *buf, int entries, int cqe_size) if (entries_per_copy < entries) { for (i = 0; i < entries / entries_per_copy; i++) { - err = copy_to_user(buf, init_ents, PAGE_SIZE); + err = copy_to_user((void __user *)buf, init_ents, PAGE_SIZE) ? + -EFAULT : 0; if (err) goto out; buf += PAGE_SIZE; } } else { - err = copy_to_user(buf, init_ents, entries * cqe_size); + err = copy_to_user((void __user *)buf, init_ents, entries * cqe_size) ? + -EFAULT : 0; } out: diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index 7df728f1e5b5..6e501af0e532 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -2067,9 +2067,11 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev, { struct mlx4_cmd_mailbox *mailbox; __be32 *outbox; + u64 qword_field; u32 dword_field; - int err; + u16 word_field; u8 byte_field; + int err; static const u8 a0_dmfs_query_hw_steering[] = { [0] = MLX4_STEERING_DMFS_A0_DEFAULT, [1] = MLX4_STEERING_DMFS_A0_DYNAMIC, @@ -2097,19 +2099,32 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev, /* QPC/EEC/CQC/EQC/RDMARC attributes */ - MLX4_GET(param->qpc_base, outbox, INIT_HCA_QPC_BASE_OFFSET); - MLX4_GET(param->log_num_qps, outbox, INIT_HCA_LOG_QP_OFFSET); - MLX4_GET(param->srqc_base, outbox, INIT_HCA_SRQC_BASE_OFFSET); - MLX4_GET(param->log_num_srqs, outbox, INIT_HCA_LOG_SRQ_OFFSET); - MLX4_GET(param->cqc_base, outbox, INIT_HCA_CQC_BASE_OFFSET); - MLX4_GET(param->log_num_cqs, outbox, INIT_HCA_LOG_CQ_OFFSET); - MLX4_GET(param->altc_base, outbox, INIT_HCA_ALTC_BASE_OFFSET); - MLX4_GET(param->auxc_base, outbox, INIT_HCA_AUXC_BASE_OFFSET); - MLX4_GET(param->eqc_base, outbox, INIT_HCA_EQC_BASE_OFFSET); - MLX4_GET(param->log_num_eqs, outbox, INIT_HCA_LOG_EQ_OFFSET); - MLX4_GET(param->num_sys_eqs, outbox, INIT_HCA_NUM_SYS_EQS_OFFSET); - MLX4_GET(param->rdmarc_base, outbox, INIT_HCA_RDMARC_BASE_OFFSET); - MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET); + MLX4_GET(qword_field, outbox, INIT_HCA_QPC_BASE_OFFSET); + param->qpc_base = qword_field & ~((u64)0x1f); + MLX4_GET(byte_field, outbox, INIT_HCA_LOG_QP_OFFSET); + param->log_num_qps = byte_field & 0x1f; + MLX4_GET(qword_field, outbox, INIT_HCA_SRQC_BASE_OFFSET); + param->srqc_base = qword_field & ~((u64)0x1f); + MLX4_GET(byte_field, outbox, INIT_HCA_LOG_SRQ_OFFSET); + param->log_num_srqs = byte_field & 0x1f; + MLX4_GET(qword_field, outbox, INIT_HCA_CQC_BASE_OFFSET); + param->cqc_base = qword_field & ~((u64)0x1f); + MLX4_GET(byte_field, outbox, INIT_HCA_LOG_CQ_OFFSET); + param->log_num_cqs = byte_field & 0x1f; + MLX4_GET(qword_field, outbox, INIT_HCA_ALTC_BASE_OFFSET); + param->altc_base = qword_field; + MLX4_GET(qword_field, outbox, INIT_HCA_AUXC_BASE_OFFSET); + param->auxc_base = qword_field; + MLX4_GET(qword_field, outbox, INIT_HCA_EQC_BASE_OFFSET); + param->eqc_base = qword_field & ~((u64)0x1f); + MLX4_GET(byte_field, outbox, INIT_HCA_LOG_EQ_OFFSET); + param->log_num_eqs = byte_field & 0x1f; + MLX4_GET(word_field, outbox, INIT_HCA_NUM_SYS_EQS_OFFSET); + param->num_sys_eqs = word_field & 0xfff; + MLX4_GET(qword_field, outbox, INIT_HCA_RDMARC_BASE_OFFSET); + param->rdmarc_base = qword_field & ~((u64)0x1f); + MLX4_GET(byte_field, outbox, INIT_HCA_LOG_RD_OFFSET); + param->log_rd_per_qp = byte_field & 0x7; MLX4_GET(dword_field, outbox, INIT_HCA_FLAGS_OFFSET); if (dword_field & (1 << INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN)) { @@ -2128,22 +2143,21 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev, /* steering attributes */ if (param->steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) { MLX4_GET(param->mc_base, outbox, INIT_HCA_FS_BASE_OFFSET); - MLX4_GET(param->log_mc_entry_sz, outbox, - INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET); - MLX4_GET(param->log_mc_table_sz, outbox, - INIT_HCA_FS_LOG_TABLE_SZ_OFFSET); - MLX4_GET(byte_field, outbox, - INIT_HCA_FS_A0_OFFSET); + MLX4_GET(byte_field, outbox, INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET); + param->log_mc_entry_sz = byte_field & 0x1f; + MLX4_GET(byte_field, outbox, INIT_HCA_FS_LOG_TABLE_SZ_OFFSET); + param->log_mc_table_sz = byte_field & 0x1f; + MLX4_GET(byte_field, outbox, INIT_HCA_FS_A0_OFFSET); param->dmfs_high_steer_mode = a0_dmfs_query_hw_steering[(byte_field >> 6) & 3]; } else { MLX4_GET(param->mc_base, outbox, INIT_HCA_MC_BASE_OFFSET); - MLX4_GET(param->log_mc_entry_sz, outbox, - INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET); - MLX4_GET(param->log_mc_hash_sz, outbox, - INIT_HCA_LOG_MC_HASH_SZ_OFFSET); - MLX4_GET(param->log_mc_table_sz, outbox, - INIT_HCA_LOG_MC_TABLE_SZ_OFFSET); + MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET); + param->log_mc_entry_sz = byte_field & 0x1f; + MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MC_HASH_SZ_OFFSET); + param->log_mc_hash_sz = byte_field & 0x1f; + MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET); + param->log_mc_table_sz = byte_field & 0x1f; } /* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */ @@ -2167,15 +2181,18 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev, /* TPT attributes */ MLX4_GET(param->dmpt_base, outbox, INIT_HCA_DMPT_BASE_OFFSET); - MLX4_GET(param->mw_enabled, outbox, INIT_HCA_TPT_MW_OFFSET); - MLX4_GET(param->log_mpt_sz, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET); + MLX4_GET(byte_field, outbox, INIT_HCA_TPT_MW_OFFSET); + param->mw_enabled = byte_field >> 7; + MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET); + param->log_mpt_sz = byte_field & 0x3f; MLX4_GET(param->mtt_base, outbox, INIT_HCA_MTT_BASE_OFFSET); MLX4_GET(param->cmpt_base, outbox, INIT_HCA_CMPT_BASE_OFFSET); /* UAR attributes */ MLX4_GET(param->uar_page_sz, outbox, INIT_HCA_UAR_PAGE_SZ_OFFSET); - MLX4_GET(param->log_uar_sz, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET); + MLX4_GET(byte_field, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET); + param->log_uar_sz = byte_field & 0xf; /* phv_check enable */ MLX4_GET(byte_field, outbox, INIT_HCA_CACHELINE_SZ_OFFSET); diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index 24a90163775e..3b0955d34716 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -554,7 +554,6 @@ struct qed_hwfn { u8 dp_level; char name[NAME_SIZE]; - bool first_on_engine; bool hw_init_done; u8 num_funcs_on_engine; @@ -805,6 +804,9 @@ struct qed_dev { u32 mcp_nvm_resp; + /* Recovery */ + bool recov_in_prog; + /* Linux specific here */ struct qede_dev *edev; struct pci_dev *pdev; @@ -944,6 +946,7 @@ void qed_link_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt); u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len, u8 *input_buf, u32 max_size, u8 *unzip_buf); +void qed_schedule_recovery_handler(struct qed_hwfn *p_hwfn); void qed_get_protocol_stats(struct qed_dev *cdev, enum qed_mcp_protocol_type type, union qed_mcp_protocol_stats *stats); diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 8f6551421945..b17003d9066c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -1959,11 +1959,6 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, (p_hwfn->hw_info.personality == QED_PCI_FCOE) ? 1 : 0); STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0); - /* Cleanup chip from previous driver if such remains exist */ - rc = qed_final_cleanup(p_hwfn, p_ptt, rel_pf_id, false); - if (rc) - return rc; - /* Sanity check before the PF init sequence that uses DMAE */ rc = qed_dmae_sanity(p_hwfn, p_ptt, "pf_phase"); if (rc) @@ -2007,17 +2002,15 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, return rc; } -static int qed_change_pci_hwfn(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u8 enable) +int qed_pglueb_set_pfid_enable(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, bool b_enable) { - u32 delay_idx = 0, val, set_val = enable ? 1 : 0; + u32 delay_idx = 0, val, set_val = b_enable ? 1 : 0; - /* Change PF in PXP */ - qed_wr(p_hwfn, p_ptt, - PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, set_val); + /* Configure the PF's internal FID_enable for master transactions */ + qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, set_val); - /* wait until value is set - try for 1 second every 50us */ + /* Wait until value is set - try for 1 second every 50us */ for (delay_idx = 0; delay_idx < 20000; delay_idx++) { val = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER); @@ -2071,13 +2064,19 @@ static int qed_vf_start(struct qed_hwfn *p_hwfn, return 0; } +static void qed_pglueb_clear_err(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR, + BIT(p_hwfn->abs_pf_id)); +} + int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) { struct qed_load_req_params load_req_params; u32 load_code, resp, param, drv_mb_param; bool b_default_mtu = true; struct qed_hwfn *p_hwfn; - int rc = 0, mfw_rc, i; + int rc = 0, i; u16 ether_type; if ((p_params->int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) { @@ -2092,7 +2091,7 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) } for_each_hwfn(cdev, i) { - struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + p_hwfn = &cdev->hwfns[i]; /* If management didn't provide a default, set one of our own */ if (!p_hwfn->hw_info.mtu) { @@ -2105,9 +2104,6 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) continue; } - /* Enable DMAE in PXP */ - rc = qed_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true); - rc = qed_calc_hw_mode(p_hwfn); if (rc) return rc; @@ -2144,12 +2140,43 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) "Load request was sent. Load code: 0x%x\n", load_code); + /* Only relevant for recovery: + * Clear the indication after LOAD_REQ is responded by the MFW. + */ + cdev->recov_in_prog = false; + qed_mcp_set_capabilities(p_hwfn, p_hwfn->p_main_ptt); qed_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt); - p_hwfn->first_on_engine = (load_code == - FW_MSG_CODE_DRV_LOAD_ENGINE); + /* Clean up chip from previous driver if such remains exist. + * This is not needed when the PF is the first one on the + * engine, since afterwards we are going to init the FW. + */ + if (load_code != FW_MSG_CODE_DRV_LOAD_ENGINE) { + rc = qed_final_cleanup(p_hwfn, p_hwfn->p_main_ptt, + p_hwfn->rel_pf_id, false); + if (rc) { + DP_NOTICE(p_hwfn, "Final cleanup failed\n"); + goto load_err; + } + } + + /* Log and clear previous pglue_b errors if such exist */ + qed_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_main_ptt); + + /* Enable the PF's internal FID_enable in the PXP */ + rc = qed_pglueb_set_pfid_enable(p_hwfn, p_hwfn->p_main_ptt, + true); + if (rc) + goto load_err; + + /* Clear the pglue_b was_error indication. + * In E4 it must be done after the BME and the internal + * FID_enable for the PF are set, since VDMs may cause the + * indication to be set again. + */ + qed_pglueb_clear_err(p_hwfn, p_hwfn->p_main_ptt); switch (load_code) { case FW_MSG_CODE_DRV_LOAD_ENGINE: @@ -2180,39 +2207,29 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) break; } - if (rc) + if (rc) { DP_NOTICE(p_hwfn, "init phase failed for loadcode 0x%x (rc %d)\n", - load_code, rc); + load_code, rc); + goto load_err; + } - /* ACK mfw regardless of success or failure of initialization */ - mfw_rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, - DRV_MSG_CODE_LOAD_DONE, - 0, &load_code, ¶m); + rc = qed_mcp_load_done(p_hwfn, p_hwfn->p_main_ptt); if (rc) return rc; - if (mfw_rc) { - DP_NOTICE(p_hwfn, "Failed sending LOAD_DONE command\n"); - return mfw_rc; - } - - /* Check if there is a DID mismatch between nvm-cfg/efuse */ - if (param & FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR) - DP_NOTICE(p_hwfn, - "warning: device configuration is not supported on this board type. The device may not function as expected.\n"); /* send DCBX attention request command */ DP_VERBOSE(p_hwfn, QED_MSG_DCB, "sending phony dcbx set command to trigger DCBx attention handling\n"); - mfw_rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, - DRV_MSG_CODE_SET_DCBX, - 1 << DRV_MB_PARAM_DCBX_NOTIFY_SHIFT, - &load_code, ¶m); - if (mfw_rc) { + rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, + DRV_MSG_CODE_SET_DCBX, + 1 << DRV_MB_PARAM_DCBX_NOTIFY_SHIFT, + &resp, ¶m); + if (rc) { DP_NOTICE(p_hwfn, "Failed to send DCBX attention request\n"); - return mfw_rc; + return rc; } p_hwfn->hw_init_done = true; @@ -2261,6 +2278,12 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) } return 0; + +load_err: + /* The MFW load lock should be released also when initialization fails. + */ + qed_mcp_load_done(p_hwfn, p_hwfn->p_main_ptt); + return rc; } #define QED_HW_STOP_RETRY_LIMIT (10) @@ -2273,6 +2296,9 @@ static void qed_hw_timers_stop(struct qed_dev *cdev, qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0); qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0); + if (cdev->recov_in_prog) + return; + for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) { if ((!qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_CONN)) && @@ -2335,12 +2361,14 @@ int qed_hw_stop(struct qed_dev *cdev) p_hwfn->hw_init_done = false; /* Send unload command to MCP */ - rc = qed_mcp_unload_req(p_hwfn, p_ptt); - if (rc) { - DP_NOTICE(p_hwfn, - "Failed sending a UNLOAD_REQ command. rc = %d.\n", - rc); - rc2 = -EINVAL; + if (!cdev->recov_in_prog) { + rc = qed_mcp_unload_req(p_hwfn, p_ptt); + if (rc) { + DP_NOTICE(p_hwfn, + "Failed sending a UNLOAD_REQ command. rc = %d.\n", + rc); + rc2 = -EINVAL; + } } qed_slowpath_irq_sync(p_hwfn); @@ -2382,27 +2410,31 @@ int qed_hw_stop(struct qed_dev *cdev) qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DB_ENABLE, 0); qed_wr(p_hwfn, p_ptt, QM_REG_PF_EN, 0); - qed_mcp_unload_done(p_hwfn, p_ptt); - if (rc) { - DP_NOTICE(p_hwfn, - "Failed sending a UNLOAD_DONE command. rc = %d.\n", - rc); - rc2 = -EINVAL; + if (!cdev->recov_in_prog) { + rc = qed_mcp_unload_done(p_hwfn, p_ptt); + if (rc) { + DP_NOTICE(p_hwfn, + "Failed sending a UNLOAD_DONE command. rc = %d.\n", + rc); + rc2 = -EINVAL; + } } } - if (IS_PF(cdev)) { + if (IS_PF(cdev) && !cdev->recov_in_prog) { p_hwfn = QED_LEADING_HWFN(cdev); p_ptt = QED_LEADING_HWFN(cdev)->p_main_ptt; - /* Disable DMAE in PXP - in CMT, this should only be done for - * first hw-function, and only after all transactions have - * stopped for all active hw-functions. + /* Clear the PF's internal FID_enable in the PXP. + * In CMT this should only be done for first hw-function, and + * only after all transactions have stopped for all active + * hw-functions. */ - rc = qed_change_pci_hwfn(p_hwfn, p_ptt, false); + rc = qed_pglueb_set_pfid_enable(p_hwfn, p_ptt, false); if (rc) { DP_NOTICE(p_hwfn, - "qed_change_pci_hwfn failed. rc = %d.\n", rc); + "qed_pglueb_set_pfid_enable() failed. rc = %d.\n", + rc); rc2 = -EINVAL; } } @@ -2502,9 +2534,8 @@ static void qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn) PGLUE_B_REG_PGL_ADDR_94_F0_BB, 0); } - /* Clean Previous errors if such exist */ - qed_wr(p_hwfn, p_hwfn->p_main_ptt, - PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR, 1 << p_hwfn->abs_pf_id); + /* Clean previous pglue_b errors if such exist */ + qed_pglueb_clear_err(p_hwfn, p_hwfn->p_main_ptt); /* enable internal target-read */ qed_wr(p_hwfn, p_hwfn->p_main_ptt, @@ -3440,6 +3471,7 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn, void __iomem *p_doorbells, enum qed_pci_personality personality) { + struct qed_dev *cdev = p_hwfn->cdev; int rc = 0; /* Split PCI bars evenly between hwfns */ @@ -3492,7 +3524,7 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn, /* Sending a mailbox to the MFW should be done after qed_get_hw_info() * is called as it sets the ports number in an engine. */ - if (IS_LEAD_HWFN(p_hwfn)) { + if (IS_LEAD_HWFN(p_hwfn) && !cdev->recov_in_prog) { rc = qed_mcp_initiate_pf_flr(p_hwfn, p_hwfn->p_main_ptt); if (rc) DP_NOTICE(p_hwfn, "Failed to initiate PF FLR\n"); diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h index acccd85170aa..e4b4e3b78e8a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h +++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h @@ -473,6 +473,18 @@ int qed_set_queue_coalesce(u16 rx_coal, u16 tx_coal, void *p_handle); /** + * @brief qed_pglueb_set_pfid_enable - Enable or disable PCI BUS MASTER + * + * @param p_hwfn + * @param p_ptt + * @param b_enable - true/false + * + * @return int + */ +int qed_pglueb_set_pfid_enable(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, bool b_enable); + +/** * @brief db_recovery_add - add doorbell information to the doorbell * recovery mechanism. * diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index b13cfb449d8f..417121e74ee9 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -12827,7 +12827,7 @@ enum MFW_DRV_MSG_TYPE { MFW_DRV_MSG_LLDP_DATA_UPDATED, MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED, MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED, - MFW_DRV_MSG_RESERVED4, + MFW_DRV_MSG_ERROR_RECOVERY, MFW_DRV_MSG_BW_UPDATE, MFW_DRV_MSG_S_TAG_UPDATE, MFW_DRV_MSG_GET_LAN_STATS, diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c index 70504dcf4087..72ec1c6bdf70 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hw.c +++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c @@ -703,6 +703,17 @@ static int qed_dmae_execute_command(struct qed_hwfn *p_hwfn, int qed_status = 0; u32 offset = 0; + if (p_hwfn->cdev->recov_in_prog) { + DP_VERBOSE(p_hwfn, + NETIF_MSG_HW, + "Recovery is in progress. Avoid DMAE transaction [{src: addr 0x%llx, type %d}, {dst: addr 0x%llx, type %d}, size %d].\n", + src_addr, src_type, dst_addr, dst_type, + size_in_dwords); + + /* Let the flow complete w/o any error handling */ + return 0; + } + qed_dmae_opcode(p_hwfn, (src_type == QED_DMAE_ADDRESS_GRC), (dst_type == QED_DMAE_ADDRESS_GRC), diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c index 92340919d852..e23980e301b6 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.c +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c @@ -255,112 +255,114 @@ out: #define PGLUE_ATTENTION_ICPL_VALID (1 << 23) #define PGLUE_ATTENTION_ZLR_VALID (1 << 25) #define PGLUE_ATTENTION_ILT_VALID (1 << 23) -static int qed_pglub_rbc_attn_cb(struct qed_hwfn *p_hwfn) + +int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) { u32 tmp; - tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, - PGLUE_B_REG_TX_ERR_WR_DETAILS2); + tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS2); if (tmp & PGLUE_ATTENTION_VALID) { u32 addr_lo, addr_hi, details; - addr_lo = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + addr_lo = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_ADD_31_0); - addr_hi = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + addr_hi = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_ADD_63_32); - details = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + details = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS); - DP_INFO(p_hwfn, - "Illegal write by chip to [%08x:%08x] blocked.\n" - "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n" - "Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n", - addr_hi, addr_lo, details, - (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID), - (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID), - GET_FIELD(details, - PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0, - tmp, - GET_FIELD(tmp, - PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 : 0, - GET_FIELD(tmp, - PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0, - GET_FIELD(tmp, - PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 : 0); + DP_NOTICE(p_hwfn, + "Illegal write by chip to [%08x:%08x] blocked.\n" + "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n" + "Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n", + addr_hi, addr_lo, details, + (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID), + (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID), + GET_FIELD(details, + PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0, + tmp, + GET_FIELD(tmp, + PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 : 0, + GET_FIELD(tmp, + PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0, + GET_FIELD(tmp, + PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 : 0); } - tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, - PGLUE_B_REG_TX_ERR_RD_DETAILS2); + tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_RD_DETAILS2); if (tmp & PGLUE_ATTENTION_RD_VALID) { u32 addr_lo, addr_hi, details; - addr_lo = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + addr_lo = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_RD_ADD_31_0); - addr_hi = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + addr_hi = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_RD_ADD_63_32); - details = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + details = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_RD_DETAILS); - DP_INFO(p_hwfn, - "Illegal read by chip from [%08x:%08x] blocked.\n" - " Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n" - " Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n", - addr_hi, addr_lo, details, - (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID), - (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID), - GET_FIELD(details, - PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0, - tmp, - GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 - : 0, - GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0, - GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 - : 0); + DP_NOTICE(p_hwfn, + "Illegal read by chip from [%08x:%08x] blocked.\n" + "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n" + "Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n", + addr_hi, addr_lo, details, + (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID), + (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID), + GET_FIELD(details, + PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0, + tmp, + GET_FIELD(tmp, + PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 : 0, + GET_FIELD(tmp, + PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0, + GET_FIELD(tmp, + PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 : 0); } - tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, - PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL); + tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL); if (tmp & PGLUE_ATTENTION_ICPL_VALID) - DP_INFO(p_hwfn, "ICPL error - %08x\n", tmp); + DP_NOTICE(p_hwfn, "ICPL error - %08x\n", tmp); - tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, - PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS); + tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS); if (tmp & PGLUE_ATTENTION_ZLR_VALID) { u32 addr_hi, addr_lo; - addr_lo = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + addr_lo = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0); - addr_hi = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + addr_hi = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32); - DP_INFO(p_hwfn, "ZLR eror - %08x [Address %08x:%08x]\n", - tmp, addr_hi, addr_lo); + DP_NOTICE(p_hwfn, "ZLR error - %08x [Address %08x:%08x]\n", + tmp, addr_hi, addr_lo); } - tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, - PGLUE_B_REG_VF_ILT_ERR_DETAILS2); + tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_ILT_ERR_DETAILS2); if (tmp & PGLUE_ATTENTION_ILT_VALID) { u32 addr_hi, addr_lo, details; - addr_lo = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + addr_lo = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_ILT_ERR_ADD_31_0); - addr_hi = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + addr_hi = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_ILT_ERR_ADD_63_32); - details = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + details = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_ILT_ERR_DETAILS); - DP_INFO(p_hwfn, - "ILT error - Details %08x Details2 %08x [Address %08x:%08x]\n", - details, tmp, addr_hi, addr_lo); + DP_NOTICE(p_hwfn, + "ILT error - Details %08x Details2 %08x [Address %08x:%08x]\n", + details, tmp, addr_hi, addr_lo); } /* Clear the indications */ - qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, - PGLUE_B_REG_LATCHED_ERRORS_CLR, (1 << 2)); + qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_LATCHED_ERRORS_CLR, BIT(2)); return 0; } +static int qed_pglueb_rbc_attn_cb(struct qed_hwfn *p_hwfn) +{ + return qed_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_dpc_ptt); +} + #define QED_DORQ_ATTENTION_REASON_MASK (0xfffff) #define QED_DORQ_ATTENTION_OPAQUE_MASK (0xffff) #define QED_DORQ_ATTENTION_OPAQUE_SHIFT (0x0) @@ -540,7 +542,7 @@ static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = { {"PGLUE misc_flr", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID}, {"PGLUE B RBC", ATTENTION_PAR_INT, - qed_pglub_rbc_attn_cb, BLOCK_PGLUE_B}, + qed_pglueb_rbc_attn_cb, BLOCK_PGLUE_B}, {"PGLUE misc_mctp", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID}, {"Flash event", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID}, diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h index d81a62ebd524..1f356ed4f761 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.h +++ b/drivers/net/ethernet/qlogic/qed/qed_int.h @@ -431,4 +431,7 @@ int qed_int_set_timer_res(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, #define QED_MAPPING_MEMORY_SIZE(dev) (NUM_OF_SBS(dev)) +int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt); + #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 6adf5bda9811..b47352643fb5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -359,6 +359,8 @@ static struct qed_dev *qed_probe(struct pci_dev *pdev, qed_init_dp(cdev, params->dp_module, params->dp_level); + cdev->recov_in_prog = params->recov_in_prog; + rc = qed_init_pci(cdev, pdev); if (rc) { DP_ERR(cdev, "init pci failed\n"); @@ -2203,6 +2205,15 @@ static int qed_nvm_get_image(struct qed_dev *cdev, enum qed_nvm_images type, return qed_mcp_get_nvm_image(hwfn, type, buf, len); } +void qed_schedule_recovery_handler(struct qed_hwfn *p_hwfn) +{ + struct qed_common_cb_ops *ops = p_hwfn->cdev->protocol_ops.common; + void *cookie = p_hwfn->cdev->ops_cookie; + + if (ops && ops->schedule_recovery_handler) + ops->schedule_recovery_handler(cookie); +} + static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal, void *handle) { @@ -2226,6 +2237,23 @@ static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode) return status; } +static int qed_recovery_process(struct qed_dev *cdev) +{ + struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *p_ptt; + int rc = 0; + + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) + return -EAGAIN; + + rc = qed_start_recovery_process(p_hwfn, p_ptt); + + qed_ptt_release(p_hwfn, p_ptt); + + return rc; +} + static int qed_update_wol(struct qed_dev *cdev, bool enabled) { struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); @@ -2380,6 +2408,8 @@ const struct qed_common_ops qed_common_ops_pass = { .nvm_get_image = &qed_nvm_get_image, .set_coalesce = &qed_set_coalesce, .set_led = &qed_set_led, + .recovery_process = &qed_recovery_process, + .recovery_prolog = &qed_recovery_prolog, .update_drv_state = &qed_update_drv_state, .update_mac = &qed_update_mac, .update_mtu = &qed_update_mtu, diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index e7f18e34ff0d..bb8541847aa5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -1070,6 +1070,27 @@ int qed_mcp_load_req(struct qed_hwfn *p_hwfn, return 0; } +int qed_mcp_load_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + u32 resp = 0, param = 0; + int rc; + + rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_LOAD_DONE, 0, &resp, + ¶m); + if (rc) { + DP_NOTICE(p_hwfn, + "Failed to send a LOAD_DONE command, rc = %d\n", rc); + return rc; + } + + /* Check if there is a DID mismatch between nvm-cfg/efuse */ + if (param & FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR) + DP_NOTICE(p_hwfn, + "warning: device configuration is not supported on this board type. The device may not function as expected.\n"); + + return 0; +} + int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct qed_mcp_mb_params mb_params; @@ -1528,6 +1549,60 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up) return 0; } +u32 qed_get_process_kill_counter(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + u32 path_offsize_addr, path_offsize, path_addr, proc_kill_cnt; + + if (IS_VF(p_hwfn->cdev)) + return -EINVAL; + + path_offsize_addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, + PUBLIC_PATH); + path_offsize = qed_rd(p_hwfn, p_ptt, path_offsize_addr); + path_addr = SECTION_ADDR(path_offsize, QED_PATH_ID(p_hwfn)); + + proc_kill_cnt = qed_rd(p_hwfn, p_ptt, + path_addr + + offsetof(struct public_path, process_kill)) & + PROCESS_KILL_COUNTER_MASK; + + return proc_kill_cnt; +} + +static void qed_mcp_handle_process_kill(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + struct qed_dev *cdev = p_hwfn->cdev; + u32 proc_kill_cnt; + + /* Prevent possible attentions/interrupts during the recovery handling + * and till its load phase, during which they will be re-enabled. + */ + qed_int_igu_disable_int(p_hwfn, p_ptt); + + DP_NOTICE(p_hwfn, "Received a process kill indication\n"); + + /* The following operations should be done once, and thus in CMT mode + * are carried out by only the first HW function. + */ + if (p_hwfn != QED_LEADING_HWFN(cdev)) + return; + + if (cdev->recov_in_prog) { + DP_NOTICE(p_hwfn, + "Ignoring the indication since a recovery process is already in progress\n"); + return; + } + + cdev->recov_in_prog = true; + + proc_kill_cnt = qed_get_process_kill_counter(p_hwfn, p_ptt); + DP_NOTICE(p_hwfn, "Process kill counter: %d\n", proc_kill_cnt); + + qed_schedule_recovery_handler(p_hwfn); +} + static void qed_mcp_send_protocol_stats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, enum MFW_DRV_MSG_TYPE type) @@ -1758,6 +1833,9 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn, case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE: qed_mcp_handle_transceiver_change(p_hwfn, p_ptt); break; + case MFW_DRV_MSG_ERROR_RECOVERY: + qed_mcp_handle_process_kill(p_hwfn, p_ptt); + break; case MFW_DRV_MSG_GET_LAN_STATS: case MFW_DRV_MSG_GET_FCOE_STATS: case MFW_DRV_MSG_GET_ISCSI_STATS: @@ -2303,6 +2381,43 @@ int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn, return 0; } +int qed_start_recovery_process(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + struct qed_dev *cdev = p_hwfn->cdev; + + if (cdev->recov_in_prog) { + DP_NOTICE(p_hwfn, + "Avoid triggering a recovery since such a process is already in progress\n"); + return -EAGAIN; + } + + DP_NOTICE(p_hwfn, "Triggering a recovery process\n"); + qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_GENERAL_ATTN_35, 0x1); + + return 0; +} + +#define QED_RECOVERY_PROLOG_SLEEP_MS 100 + +int qed_recovery_prolog(struct qed_dev *cdev) +{ + struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *p_ptt = p_hwfn->p_main_ptt; + int rc; + + /* Allow ongoing PCIe transactions to complete */ + msleep(QED_RECOVERY_PROLOG_SLEEP_MS); + + /* Clear the PF's internal FID_enable in the PXP */ + rc = qed_pglueb_set_pfid_enable(p_hwfn, p_ptt, false); + if (rc) + DP_NOTICE(p_hwfn, + "qed_pglueb_set_pfid_enable() failed. rc = %d.\n", + rc); + + return rc; +} + static int qed_mcp_config_vf_msix_bb(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u8 vf_id, u8 num) diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index eddf67798d6f..6e1d72a669ae 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -441,6 +441,38 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn, struct qed_mcp_drv_version *p_ver); /** + * @brief Read the MFW process kill counter + * + * @param p_hwfn + * @param p_ptt + * + * @return u32 + */ +u32 qed_get_process_kill_counter(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt); + +/** + * @brief Trigger a recovery process + * + * @param p_hwfn + * @param p_ptt + * + * @return int + */ +int qed_start_recovery_process(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); + +/** + * @brief A recovery handler must call this function as its first step. + * It is assumed that the handler is not run from an interrupt context. + * + * @param cdev + * @param p_ptt + * + * @return int + */ +int qed_recovery_prolog(struct qed_dev *cdev); + +/** * @brief Notify MFW about the change in base device properties * * @param p_hwfn @@ -801,6 +833,16 @@ int qed_mcp_load_req(struct qed_hwfn *p_hwfn, struct qed_load_req_params *p_params); /** + * @brief Sends a LOAD_DONE message to the MFW + * + * @param p_hwfn + * @param p_ptt + * + * @return int - 0 - Operation was successful. + */ +int qed_mcp_load_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); + +/** * @brief Sends a UNLOAD_REQ message to the MFW * * @param p_hwfn diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h index 8939ed6e08b7..5ce825ca5f24 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h +++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h @@ -518,6 +518,8 @@ 0x180824UL #define MISC_REG_AEU_GENERAL_ATTN_0 \ 0x008400UL +#define MISC_REG_AEU_GENERAL_ATTN_35 \ + 0x00848cUL #define CAU_REG_SB_ADDR_MEMORY \ 0x1c8000UL #define CAU_REG_SB_VAR_MEMORY \ diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c index eb88bbc6b193..3e0f7c46bb1b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_spq.c +++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c @@ -790,6 +790,17 @@ static int qed_spq_pend_post(struct qed_hwfn *p_hwfn) SPQ_HIGH_PRI_RESERVE_DEFAULT); } +static void qed_spq_recov_set_ret_code(struct qed_spq_entry *p_ent, + u8 *fw_return_code) +{ + if (!fw_return_code) + return; + + if (p_ent->elem.hdr.protocol_id == PROTOCOLID_ROCE || + p_ent->elem.hdr.protocol_id == PROTOCOLID_IWARP) + *fw_return_code = RDMA_RETURN_OK; +} + /* Avoid overriding of SPQ entries when getting out-of-order completions, by * marking the completions in a bitmap and increasing the chain consumer only * for the first successive completed entries. @@ -825,6 +836,17 @@ int qed_spq_post(struct qed_hwfn *p_hwfn, return -EINVAL; } + if (p_hwfn->cdev->recov_in_prog) { + DP_VERBOSE(p_hwfn, + QED_MSG_SPQ, + "Recovery is in progress. Skip spq post [cmd %02x protocol %02x]\n", + p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id); + + /* Let the flow complete w/o any error handling */ + qed_spq_recov_set_ret_code(p_ent, fw_return_code); + return 0; + } + /* Complete the entry */ rc = qed_spq_fill_entry(p_hwfn, p_ent); diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index ca6290fa0f30..71e28be58102 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -4447,6 +4447,13 @@ int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled) if (cdev->p_iov_info && cdev->p_iov_info->num_vfs && pci_enabled) pci_disable_sriov(cdev->pdev); + if (cdev->recov_in_prog) { + DP_VERBOSE(cdev, + QED_MSG_IOV, + "Skip SRIOV disable operations in the device since a recovery is in progress\n"); + goto out; + } + for_each_hwfn(cdev, i) { struct qed_hwfn *hwfn = &cdev->hwfns[i]; struct qed_ptt *ptt = qed_ptt_acquire(hwfn); @@ -4486,7 +4493,7 @@ int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled) qed_ptt_release(hwfn, ptt); } - +out: qed_iov_set_vfs_to_disable(cdev, false); return 0; diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index 613249d1e967..843416404aeb 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -162,6 +162,7 @@ struct qede_rdma_dev { struct list_head entry; struct list_head rdma_event_list; struct workqueue_struct *rdma_wq; + bool exp_recovery; }; struct qede_ptp; @@ -264,6 +265,7 @@ struct qede_dev { enum QEDE_STATE { QEDE_STATE_CLOSED, QEDE_STATE_OPEN, + QEDE_STATE_RECOVERY, }; #define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo)) @@ -462,6 +464,7 @@ struct qede_fastpath { #define QEDE_CSUM_UNNECESSARY BIT(1) #define QEDE_TUNN_CSUM_UNNECESSARY BIT(2) +#define QEDE_SP_RECOVERY 0 #define QEDE_SP_RX_MODE 1 #ifdef CONFIG_RFS_ACCEL diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 5a74fcbdbc2b..de955f2b2980 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -133,23 +133,12 @@ static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id); static void qede_remove(struct pci_dev *pdev); static void qede_shutdown(struct pci_dev *pdev); static void qede_link_update(void *dev, struct qed_link_output *link); +static void qede_schedule_recovery_handler(void *dev); +static void qede_recovery_handler(struct qede_dev *edev); static void qede_get_eth_tlv_data(void *edev, void *data); static void qede_get_generic_tlv_data(void *edev, struct qed_generic_tlvs *data); -/* The qede lock is used to protect driver state change and driver flows that - * are not reentrant. - */ -void __qede_lock(struct qede_dev *edev) -{ - mutex_lock(&edev->qede_lock); -} - -void __qede_unlock(struct qede_dev *edev) -{ - mutex_unlock(&edev->qede_lock); -} - #ifdef CONFIG_QED_SRIOV static int qede_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, u8 qos, __be16 vlan_proto) @@ -231,6 +220,7 @@ static struct qed_eth_cb_ops qede_ll_ops = { .arfs_filter_op = qede_arfs_filter_op, #endif .link_update = qede_link_update, + .schedule_recovery_handler = qede_schedule_recovery_handler, .get_generic_tlv_data = qede_get_generic_tlv_data, .get_protocol_tlv_data = qede_get_eth_tlv_data, }, @@ -950,11 +940,57 @@ err: return -ENOMEM; } +/* The qede lock is used to protect driver state change and driver flows that + * are not reentrant. + */ +void __qede_lock(struct qede_dev *edev) +{ + mutex_lock(&edev->qede_lock); +} + +void __qede_unlock(struct qede_dev *edev) +{ + mutex_unlock(&edev->qede_lock); +} + +/* This version of the lock should be used when acquiring the RTNL lock is also + * needed in addition to the internal qede lock. + */ +void qede_lock(struct qede_dev *edev) +{ + rtnl_lock(); + __qede_lock(edev); +} + +void qede_unlock(struct qede_dev *edev) +{ + __qede_unlock(edev); + rtnl_unlock(); +} + static void qede_sp_task(struct work_struct *work) { struct qede_dev *edev = container_of(work, struct qede_dev, sp_task.work); + /* The locking scheme depends on the specific flag: + * In case of QEDE_SP_RECOVERY, acquiring the RTNL lock is required to + * ensure that ongoing flows are ended and new ones are not started. + * In other cases - only the internal qede lock should be acquired. + */ + + if (test_and_clear_bit(QEDE_SP_RECOVERY, &edev->sp_flags)) { +#ifdef CONFIG_QED_SRIOV + /* SRIOV must be disabled outside the lock to avoid a deadlock. + * The recovery of the active VFs is currently not supported. + */ + qede_sriov_configure(edev->pdev, 0); +#endif + qede_lock(edev); + qede_recovery_handler(edev); + qede_unlock(edev); + } + __qede_lock(edev); if (test_and_clear_bit(QEDE_SP_RX_MODE, &edev->sp_flags)) @@ -1031,8 +1067,13 @@ static void qede_log_probe(struct qede_dev *edev) enum qede_probe_mode { QEDE_PROBE_NORMAL, + QEDE_PROBE_RECOVERY, }; +#define QEDE_RDMA_PROBE_MODE(mode) \ + ((mode) == QEDE_PROBE_NORMAL ? QEDE_RDMA_PROBE_NORMAL \ + : QEDE_RDMA_PROBE_RECOVERY) + static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, bool is_vf, enum qede_probe_mode mode) { @@ -1051,6 +1092,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, probe_params.dp_module = dp_module; probe_params.dp_level = dp_level; probe_params.is_vf = is_vf; + probe_params.recov_in_prog = (mode == QEDE_PROBE_RECOVERY); cdev = qed_ops->common->probe(pdev, &probe_params); if (!cdev) { rc = -ENODEV; @@ -1078,11 +1120,20 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, if (rc) goto err2; - edev = qede_alloc_etherdev(cdev, pdev, &dev_info, dp_module, - dp_level); - if (!edev) { - rc = -ENOMEM; - goto err2; + if (mode != QEDE_PROBE_RECOVERY) { + edev = qede_alloc_etherdev(cdev, pdev, &dev_info, dp_module, + dp_level); + if (!edev) { + rc = -ENOMEM; + goto err2; + } + } else { + struct net_device *ndev = pci_get_drvdata(pdev); + + edev = netdev_priv(ndev); + edev->cdev = cdev; + memset(&edev->stats, 0, sizeof(edev->stats)); + memcpy(&edev->dev_info, &dev_info, sizeof(dev_info)); } if (is_vf) @@ -1090,28 +1141,31 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, qede_init_ndev(edev); - rc = qede_rdma_dev_add(edev); + rc = qede_rdma_dev_add(edev, QEDE_RDMA_PROBE_MODE(mode)); if (rc) goto err3; - /* Prepare the lock prior to the registration of the netdev, - * as once it's registered we might reach flows requiring it - * [it's even possible to reach a flow needing it directly - * from there, although it's unlikely]. - */ - INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task); - mutex_init(&edev->qede_lock); - rc = register_netdev(edev->ndev); - if (rc) { - DP_NOTICE(edev, "Cannot register net-device\n"); - goto err4; + if (mode != QEDE_PROBE_RECOVERY) { + /* Prepare the lock prior to the registration of the netdev, + * as once it's registered we might reach flows requiring it + * [it's even possible to reach a flow needing it directly + * from there, although it's unlikely]. + */ + INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task); + mutex_init(&edev->qede_lock); + + rc = register_netdev(edev->ndev); + if (rc) { + DP_NOTICE(edev, "Cannot register net-device\n"); + goto err4; + } } edev->ops->common->set_name(cdev, edev->ndev->name); /* PTP not supported on VFs */ if (!is_vf) - qede_ptp_enable(edev, true); + qede_ptp_enable(edev, (mode == QEDE_PROBE_NORMAL)); edev->ops->register_ops(cdev, &qede_ll_ops, edev); @@ -1126,7 +1180,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, return 0; err4: - qede_rdma_dev_remove(edev); + qede_rdma_dev_remove(edev, QEDE_RDMA_PROBE_MODE(mode)); err3: free_netdev(edev->ndev); err2: @@ -1162,8 +1216,13 @@ static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id) enum qede_remove_mode { QEDE_REMOVE_NORMAL, + QEDE_REMOVE_RECOVERY, }; +#define QEDE_RDMA_REMOVE_MODE(mode) \ + ((mode) == QEDE_REMOVE_NORMAL ? QEDE_RDMA_REMOVE_NORMAL \ + : QEDE_RDMA_REMOVE_RECOVERY) + static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode) { struct net_device *ndev = pci_get_drvdata(pdev); @@ -1172,15 +1231,19 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode) DP_INFO(edev, "Starting qede_remove\n"); - qede_rdma_dev_remove(edev); - unregister_netdev(ndev); - cancel_delayed_work_sync(&edev->sp_task); + qede_rdma_dev_remove(edev, QEDE_RDMA_REMOVE_MODE(mode)); - qede_ptp_disable(edev); + if (mode != QEDE_REMOVE_RECOVERY) { + unregister_netdev(ndev); - edev->ops->common->set_power_state(cdev, PCI_D0); + cancel_delayed_work_sync(&edev->sp_task); - pci_set_drvdata(pdev, NULL); + edev->ops->common->set_power_state(cdev, PCI_D0); + + pci_set_drvdata(pdev, NULL); + } + + qede_ptp_disable(edev); /* Use global ops since we've freed edev */ qed_ops->common->slowpath_stop(cdev); @@ -1194,7 +1257,8 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode) * [e.g., QED register callbacks] won't break anything when * accessing the netdevice. */ - free_netdev(ndev); + if (mode != QEDE_REMOVE_RECOVERY) + free_netdev(ndev); dev_info(&pdev->dev, "Ending qede_remove successfully\n"); } @@ -1539,6 +1603,58 @@ static int qede_alloc_mem_load(struct qede_dev *edev) return 0; } +static void qede_empty_tx_queue(struct qede_dev *edev, + struct qede_tx_queue *txq) +{ + unsigned int pkts_compl = 0, bytes_compl = 0; + struct netdev_queue *netdev_txq; + int rc, len = 0; + + netdev_txq = netdev_get_tx_queue(edev->ndev, txq->ndev_txq_id); + + while (qed_chain_get_cons_idx(&txq->tx_pbl) != + qed_chain_get_prod_idx(&txq->tx_pbl)) { + DP_VERBOSE(edev, NETIF_MSG_IFDOWN, + "Freeing a packet on tx queue[%d]: chain_cons 0x%x, chain_prod 0x%x\n", + txq->index, qed_chain_get_cons_idx(&txq->tx_pbl), + qed_chain_get_prod_idx(&txq->tx_pbl)); + + rc = qede_free_tx_pkt(edev, txq, &len); + if (rc) { + DP_NOTICE(edev, + "Failed to free a packet on tx queue[%d]: chain_cons 0x%x, chain_prod 0x%x\n", + txq->index, + qed_chain_get_cons_idx(&txq->tx_pbl), + qed_chain_get_prod_idx(&txq->tx_pbl)); + break; + } + + bytes_compl += len; + pkts_compl++; + txq->sw_tx_cons++; + } + + netdev_tx_completed_queue(netdev_txq, pkts_compl, bytes_compl); +} + +static void qede_empty_tx_queues(struct qede_dev *edev) +{ + int i; + + for_each_queue(i) + if (edev->fp_array[i].type & QEDE_FASTPATH_TX) { + int cos; + + for_each_cos_in_txq(edev, cos) { + struct qede_fastpath *fp; + + fp = &edev->fp_array[i]; + qede_empty_tx_queue(edev, + &fp->txq[cos]); + } + } +} + /* This function inits fp content and resets the SB, RXQ and TXQ structures */ static void qede_init_fp(struct qede_dev *edev) { @@ -2053,6 +2169,7 @@ out: enum qede_unload_mode { QEDE_UNLOAD_NORMAL, + QEDE_UNLOAD_RECOVERY, }; static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode, @@ -2068,7 +2185,8 @@ static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode, clear_bit(QEDE_FLAGS_LINK_REQUESTED, &edev->flags); - edev->state = QEDE_STATE_CLOSED; + if (mode != QEDE_UNLOAD_RECOVERY) + edev->state = QEDE_STATE_CLOSED; qede_rdma_dev_event_close(edev); @@ -2076,17 +2194,20 @@ static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode, netif_tx_disable(edev->ndev); netif_carrier_off(edev->ndev); - /* Reset the link */ - memset(&link_params, 0, sizeof(link_params)); - link_params.link_up = false; - edev->ops->common->set_link(edev->cdev, &link_params); - rc = qede_stop_queues(edev); - if (rc) { - qede_sync_free_irqs(edev); - goto out; - } + if (mode != QEDE_UNLOAD_RECOVERY) { + /* Reset the link */ + memset(&link_params, 0, sizeof(link_params)); + link_params.link_up = false; + edev->ops->common->set_link(edev->cdev, &link_params); - DP_INFO(edev, "Stopped Queues\n"); + rc = qede_stop_queues(edev); + if (rc) { + qede_sync_free_irqs(edev); + goto out; + } + + DP_INFO(edev, "Stopped Queues\n"); + } qede_vlan_mark_nonconfigured(edev); edev->ops->fastpath_stop(edev->cdev); @@ -2102,18 +2223,26 @@ static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode, qede_napi_disable_remove(edev); + if (mode == QEDE_UNLOAD_RECOVERY) + qede_empty_tx_queues(edev); + qede_free_mem_load(edev); qede_free_fp_array(edev); out: if (!is_locked) __qede_unlock(edev); + + if (mode != QEDE_UNLOAD_RECOVERY) + DP_NOTICE(edev, "Link is down\n"); + DP_INFO(edev, "Ending qede unload\n"); } enum qede_load_mode { QEDE_LOAD_NORMAL, QEDE_LOAD_RELOAD, + QEDE_LOAD_RECOVERY, }; static int qede_load(struct qede_dev *edev, enum qede_load_mode mode, @@ -2293,6 +2422,77 @@ static void qede_link_update(void *dev, struct qed_link_output *link) } } +static void qede_schedule_recovery_handler(void *dev) +{ + struct qede_dev *edev = dev; + + if (edev->state == QEDE_STATE_RECOVERY) { + DP_NOTICE(edev, + "Avoid scheduling a recovery handling since already in recovery state\n"); + return; + } + + set_bit(QEDE_SP_RECOVERY, &edev->sp_flags); + schedule_delayed_work(&edev->sp_task, 0); + + DP_INFO(edev, "Scheduled a recovery handler\n"); +} + +static void qede_recovery_failed(struct qede_dev *edev) +{ + netdev_err(edev->ndev, "Recovery handling has failed. Power cycle is needed.\n"); + + netif_device_detach(edev->ndev); + + if (edev->cdev) + edev->ops->common->set_power_state(edev->cdev, PCI_D3hot); +} + +static void qede_recovery_handler(struct qede_dev *edev) +{ + u32 curr_state = edev->state; + int rc; + + DP_NOTICE(edev, "Starting a recovery process\n"); + + /* No need to acquire first the qede_lock since is done by qede_sp_task + * before calling this function. + */ + edev->state = QEDE_STATE_RECOVERY; + + edev->ops->common->recovery_prolog(edev->cdev); + + if (curr_state == QEDE_STATE_OPEN) + qede_unload(edev, QEDE_UNLOAD_RECOVERY, true); + + __qede_remove(edev->pdev, QEDE_REMOVE_RECOVERY); + + rc = __qede_probe(edev->pdev, edev->dp_module, edev->dp_level, + IS_VF(edev), QEDE_PROBE_RECOVERY); + if (rc) { + edev->cdev = NULL; + goto err; + } + + if (curr_state == QEDE_STATE_OPEN) { + rc = qede_load(edev, QEDE_LOAD_RECOVERY, true); + if (rc) + goto err; + + qede_config_rx_mode(edev->ndev); + udp_tunnel_get_rx_info(edev->ndev); + } + + edev->state = curr_state; + + DP_NOTICE(edev, "Recovery handling is done\n"); + + return; + +err: + qede_recovery_failed(edev); +} + static bool qede_is_txq_full(struct qede_dev *edev, struct qede_tx_queue *txq) { struct netdev_queue *netdev_txq; diff --git a/drivers/net/ethernet/qlogic/qede/qede_rdma.c b/drivers/net/ethernet/qlogic/qede/qede_rdma.c index 1900bf7e67d1..9668e5e47d5f 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_rdma.c +++ b/drivers/net/ethernet/qlogic/qede/qede_rdma.c @@ -50,6 +50,8 @@ static void _qede_rdma_dev_add(struct qede_dev *edev) if (!qedr_drv) return; + /* Leftovers from previous error recovery */ + edev->rdma_info.exp_recovery = false; edev->rdma_info.qedr_dev = qedr_drv->add(edev->cdev, edev->pdev, edev->ndev); } @@ -87,21 +89,26 @@ static void qede_rdma_destroy_wq(struct qede_dev *edev) destroy_workqueue(edev->rdma_info.rdma_wq); } -int qede_rdma_dev_add(struct qede_dev *edev) +int qede_rdma_dev_add(struct qede_dev *edev, enum qede_rdma_probe_mode mode) { - int rc = 0; + int rc; - if (qede_rdma_supported(edev)) { - rc = qede_rdma_create_wq(edev); - if (rc) - return rc; + if (!qede_rdma_supported(edev)) + return 0; - INIT_LIST_HEAD(&edev->rdma_info.entry); - mutex_lock(&qedr_dev_list_lock); - list_add_tail(&edev->rdma_info.entry, &qedr_dev_list); - _qede_rdma_dev_add(edev); - mutex_unlock(&qedr_dev_list_lock); - } + /* Cannot start qedr while recovering since it wasn't fully stopped */ + if (mode == QEDE_RDMA_PROBE_RECOVERY) + return 0; + + rc = qede_rdma_create_wq(edev); + if (rc) + return rc; + + INIT_LIST_HEAD(&edev->rdma_info.entry); + mutex_lock(&qedr_dev_list_lock); + list_add_tail(&edev->rdma_info.entry, &qedr_dev_list); + _qede_rdma_dev_add(edev); + mutex_unlock(&qedr_dev_list_lock); return rc; } @@ -110,19 +117,31 @@ static void _qede_rdma_dev_remove(struct qede_dev *edev) { if (qedr_drv && qedr_drv->remove && edev->rdma_info.qedr_dev) qedr_drv->remove(edev->rdma_info.qedr_dev); - edev->rdma_info.qedr_dev = NULL; } -void qede_rdma_dev_remove(struct qede_dev *edev) +void qede_rdma_dev_remove(struct qede_dev *edev, + enum qede_rdma_remove_mode mode) { if (!qede_rdma_supported(edev)) return; - qede_rdma_destroy_wq(edev); - mutex_lock(&qedr_dev_list_lock); - _qede_rdma_dev_remove(edev); - list_del(&edev->rdma_info.entry); - mutex_unlock(&qedr_dev_list_lock); + /* Cannot remove qedr while recovering since it wasn't fully stopped */ + if (mode == QEDE_RDMA_REMOVE_NORMAL) { + qede_rdma_destroy_wq(edev); + mutex_lock(&qedr_dev_list_lock); + if (!edev->rdma_info.exp_recovery) + _qede_rdma_dev_remove(edev); + edev->rdma_info.qedr_dev = NULL; + list_del(&edev->rdma_info.entry); + mutex_unlock(&qedr_dev_list_lock); + } else { + if (!edev->rdma_info.exp_recovery) { + mutex_lock(&qedr_dev_list_lock); + _qede_rdma_dev_remove(edev); + mutex_unlock(&qedr_dev_list_lock); + } + edev->rdma_info.exp_recovery = true; + } } static void _qede_rdma_dev_open(struct qede_dev *edev) @@ -204,7 +223,8 @@ void qede_rdma_unregister_driver(struct qedr_driver *drv) mutex_lock(&qedr_dev_list_lock); list_for_each_entry(edev, &qedr_dev_list, rdma_info.entry) { - if (edev->rdma_info.qedr_dev) + /* If device has experienced recovery it was already removed */ + if (edev->rdma_info.qedr_dev && !edev->rdma_info.exp_recovery) _qede_rdma_dev_remove(edev); } qedr_drv = NULL; @@ -284,6 +304,10 @@ static void qede_rdma_add_event(struct qede_dev *edev, { struct qede_rdma_event_work *event_node; + /* If a recovery was experienced avoid adding the event */ + if (edev->rdma_info.exp_recovery) + return; + if (!edev->rdma_info.qedr_dev) return; diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index ffc1ada4e6da..d28c8f9ca55b 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -343,7 +343,7 @@ static int ravb_ring_init(struct net_device *ndev, int q) int i; priv->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ : ndev->mtu) + - ETH_HLEN + VLAN_HLEN; + ETH_HLEN + VLAN_HLEN + sizeof(__sum16); /* Allocate RX and TX skb rings */ priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q], @@ -524,13 +524,15 @@ static void ravb_rx_csum(struct sk_buff *skb) { u8 *hw_csum; - /* The hardware checksum is 2 bytes appended to packet data */ - if (unlikely(skb->len < 2)) + /* The hardware checksum is contained in sizeof(__sum16) (2) bytes + * appended to packet data + */ + if (unlikely(skb->len < sizeof(__sum16))) return; - hw_csum = skb_tail_pointer(skb) - 2; + hw_csum = skb_tail_pointer(skb) - sizeof(__sum16); skb->csum = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum)); skb->ip_summed = CHECKSUM_COMPLETE; - skb_trim(skb, skb->len - 2); + skb_trim(skb, skb->len - sizeof(__sum16)); } /* Packet receive function for Ethernet AVB */ diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index b6a50058bb8d..2f2bda68d861 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -6046,22 +6046,25 @@ static const struct efx_ef10_nvram_type_info efx_ef10_nvram_types[] = { { NVRAM_PARTITION_TYPE_EXPANSION_UEFI, 0, 0, "sfc_uefi" }, { NVRAM_PARTITION_TYPE_STATUS, 0, 0, "sfc_status" } }; +#define EF10_NVRAM_PARTITION_COUNT ARRAY_SIZE(efx_ef10_nvram_types) static int efx_ef10_mtd_probe_partition(struct efx_nic *efx, struct efx_mcdi_mtd_partition *part, - unsigned int type) + unsigned int type, + unsigned long *found) { MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_METADATA_IN_LEN); MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_METADATA_OUT_LENMAX); const struct efx_ef10_nvram_type_info *info; size_t size, erase_size, outlen; + int type_idx = 0; bool protected; int rc; - for (info = efx_ef10_nvram_types; ; info++) { - if (info == - efx_ef10_nvram_types + ARRAY_SIZE(efx_ef10_nvram_types)) + for (type_idx = 0; ; type_idx++) { + if (type_idx == EF10_NVRAM_PARTITION_COUNT) return -ENODEV; + info = efx_ef10_nvram_types + type_idx; if ((type & ~info->type_mask) == info->type) break; } @@ -6074,6 +6077,13 @@ static int efx_ef10_mtd_probe_partition(struct efx_nic *efx, if (protected) return -ENODEV; /* hide it */ + /* If we've already exposed a partition of this type, hide this + * duplicate. All operations on MTDs are keyed by the type anyway, + * so we can't act on the duplicate. + */ + if (__test_and_set_bit(type_idx, found)) + return -EEXIST; + part->nvram_type = type; MCDI_SET_DWORD(inbuf, NVRAM_METADATA_IN_TYPE, type); @@ -6105,6 +6115,7 @@ static int efx_ef10_mtd_probe_partition(struct efx_nic *efx, static int efx_ef10_mtd_probe(struct efx_nic *efx) { MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX); + DECLARE_BITMAP(found, EF10_NVRAM_PARTITION_COUNT); struct efx_mcdi_mtd_partition *parts; size_t outlen, n_parts_total, i, n_parts; unsigned int type; @@ -6133,11 +6144,13 @@ static int efx_ef10_mtd_probe(struct efx_nic *efx) for (i = 0; i < n_parts_total; i++) { type = MCDI_ARRAY_DWORD(outbuf, NVRAM_PARTITIONS_OUT_TYPE_ID, i); - rc = efx_ef10_mtd_probe_partition(efx, &parts[n_parts], type); - if (rc == 0) - n_parts++; - else if (rc != -ENODEV) + rc = efx_ef10_mtd_probe_partition(efx, &parts[n_parts], type, + found); + if (rc == -EEXIST || rc == -ENODEV) + continue; + if (rc) goto fail; + n_parts++; } rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts)); diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c index 9020b084b953..7ec4eb74fe21 100644 --- a/drivers/net/ethernet/sun/cassini.c +++ b/drivers/net/ethernet/sun/cassini.c @@ -1,22 +1,9 @@ -// SPDX-License-Identifier: GPL-2.0 +// SPDX-License-Identifier: GPL-2.0+ /* cassini.c: Sun Microsystems Cassini(+) ethernet driver. * * Copyright (C) 2004 Sun Microsystems Inc. * Copyright (C) 2003 Adrian Sun (asun@darksunrising.com) * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation; either version 2 of the - * License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see <http://www.gnu.org/licenses/>. - * * This driver uses the sungem driver (c) David Miller * (davem@redhat.com) as its basis. * diff --git a/drivers/net/ethernet/sun/cassini.h b/drivers/net/ethernet/sun/cassini.h index 13f3860496a8..ae5f05f03f88 100644 --- a/drivers/net/ethernet/sun/cassini.h +++ b/drivers/net/ethernet/sun/cassini.h @@ -1,23 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0+ */ /* $Id: cassini.h,v 1.16 2004/08/17 21:15:16 zaumen Exp $ * cassini.h: Definitions for Sun Microsystems Cassini(+) ethernet driver. * * Copyright (C) 2004 Sun Microsystems Inc. * Copyright (c) 2003 Adrian Sun (asun@darksunrising.com) * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation; either version 2 of the - * License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see <http://www.gnu.org/licenses/>. - * * vendor id: 0x108E (Sun Microsystems, Inc.) * device id: 0xabba (Cassini) * revision ids: 0x01 = Cassini diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index ef6f766f6389..e859ae2e42d5 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -144,6 +144,8 @@ struct hv_netvsc_packet { u32 total_data_buflen; }; +#define NETVSC_HASH_KEYLEN 40 + struct netvsc_device_info { unsigned char mac_adr[ETH_ALEN]; u32 num_chn; @@ -151,6 +153,8 @@ struct netvsc_device_info { u32 recv_sections; u32 send_section_size; u32 recv_section_size; + + u8 rss_key[NETVSC_HASH_KEYLEN]; }; enum rndis_device_state { @@ -160,8 +164,6 @@ enum rndis_device_state { RNDIS_DEV_DATAINITIALIZED, }; -#define NETVSC_HASH_KEYLEN 40 - struct rndis_device { struct net_device *ndev; @@ -209,7 +211,9 @@ int netvsc_recv_callback(struct net_device *net, void netvsc_channel_cb(void *context); int netvsc_poll(struct napi_struct *napi, int budget); -int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev); +int rndis_set_subchannel(struct net_device *ndev, + struct netvsc_device *nvdev, + struct netvsc_device_info *dev_info); int rndis_filter_open(struct netvsc_device *nvdev); int rndis_filter_close(struct netvsc_device *nvdev); struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, @@ -1177,7 +1181,7 @@ enum ndis_per_pkt_info_type { enum rndis_per_pkt_info_interal_type { RNDIS_PKTINFO_ID = 1, - /* Add more memebers here */ + /* Add more members here */ RNDIS_PKTINFO_MAX }; diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 922054c1d544..813d195bbd57 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -84,7 +84,7 @@ static void netvsc_subchan_work(struct work_struct *w) rdev = nvdev->extension; if (rdev) { - ret = rndis_set_subchannel(rdev->ndev, nvdev); + ret = rndis_set_subchannel(rdev->ndev, nvdev, NULL); if (ret == 0) { netif_device_attach(rdev->ndev); } else { @@ -1331,7 +1331,7 @@ void netvsc_channel_cb(void *context) prefetch(hv_get_ring_buffer(rbi) + rbi->priv_read_index); if (napi_schedule_prep(&nvchan->napi)) { - /* disable interupts from host */ + /* disable interrupts from host */ hv_begin_read(rbi); __napi_schedule_irqoff(&nvchan->napi); diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 91ed15ea5883..256adbd044f5 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -370,7 +370,7 @@ static u32 fill_pg_buf(struct page *page, u32 offset, u32 len, { int j = 0; - /* Deal with compund pages by ignoring unused part + /* Deal with compound pages by ignoring unused part * of the page. */ page += (offset >> PAGE_SHIFT); @@ -858,6 +858,39 @@ static void netvsc_get_channels(struct net_device *net, } } +/* Alloc struct netvsc_device_info, and initialize it from either existing + * struct netvsc_device, or from default values. + */ +static struct netvsc_device_info *netvsc_devinfo_get + (struct netvsc_device *nvdev) +{ + struct netvsc_device_info *dev_info; + + dev_info = kzalloc(sizeof(*dev_info), GFP_ATOMIC); + + if (!dev_info) + return NULL; + + if (nvdev) { + dev_info->num_chn = nvdev->num_chn; + dev_info->send_sections = nvdev->send_section_cnt; + dev_info->send_section_size = nvdev->send_section_size; + dev_info->recv_sections = nvdev->recv_section_cnt; + dev_info->recv_section_size = nvdev->recv_section_size; + + memcpy(dev_info->rss_key, nvdev->extension->rss_key, + NETVSC_HASH_KEYLEN); + } else { + dev_info->num_chn = VRSS_CHANNEL_DEFAULT; + dev_info->send_sections = NETVSC_DEFAULT_TX; + dev_info->send_section_size = NETVSC_SEND_SECTION_SIZE; + dev_info->recv_sections = NETVSC_DEFAULT_RX; + dev_info->recv_section_size = NETVSC_RECV_SECTION_SIZE; + } + + return dev_info; +} + static int netvsc_detach(struct net_device *ndev, struct netvsc_device *nvdev) { @@ -909,7 +942,7 @@ static int netvsc_attach(struct net_device *ndev, return PTR_ERR(nvdev); if (nvdev->num_chn > 1) { - ret = rndis_set_subchannel(ndev, nvdev); + ret = rndis_set_subchannel(ndev, nvdev, dev_info); /* if unavailable, just proceed with one queue */ if (ret) { @@ -943,7 +976,7 @@ static int netvsc_set_channels(struct net_device *net, struct net_device_context *net_device_ctx = netdev_priv(net); struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); unsigned int orig, count = channels->combined_count; - struct netvsc_device_info device_info; + struct netvsc_device_info *device_info; int ret; /* We do not support separate count for rx, tx, or other */ @@ -962,24 +995,26 @@ static int netvsc_set_channels(struct net_device *net, orig = nvdev->num_chn; - memset(&device_info, 0, sizeof(device_info)); - device_info.num_chn = count; - device_info.send_sections = nvdev->send_section_cnt; - device_info.send_section_size = nvdev->send_section_size; - device_info.recv_sections = nvdev->recv_section_cnt; - device_info.recv_section_size = nvdev->recv_section_size; + device_info = netvsc_devinfo_get(nvdev); + + if (!device_info) + return -ENOMEM; + + device_info->num_chn = count; ret = netvsc_detach(net, nvdev); if (ret) - return ret; + goto out; - ret = netvsc_attach(net, &device_info); + ret = netvsc_attach(net, device_info); if (ret) { - device_info.num_chn = orig; - if (netvsc_attach(net, &device_info)) + device_info->num_chn = orig; + if (netvsc_attach(net, device_info)) netdev_err(net, "restoring channel setting failed\n"); } +out: + kfree(device_info); return ret; } @@ -1048,48 +1083,45 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu) struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev); struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); int orig_mtu = ndev->mtu; - struct netvsc_device_info device_info; + struct netvsc_device_info *device_info; int ret = 0; if (!nvdev || nvdev->destroy) return -ENODEV; + device_info = netvsc_devinfo_get(nvdev); + + if (!device_info) + return -ENOMEM; + /* Change MTU of underlying VF netdev first. */ if (vf_netdev) { ret = dev_set_mtu(vf_netdev, mtu); if (ret) - return ret; + goto out; } - memset(&device_info, 0, sizeof(device_info)); - device_info.num_chn = nvdev->num_chn; - device_info.send_sections = nvdev->send_section_cnt; - device_info.send_section_size = nvdev->send_section_size; - device_info.recv_sections = nvdev->recv_section_cnt; - device_info.recv_section_size = nvdev->recv_section_size; - ret = netvsc_detach(ndev, nvdev); if (ret) goto rollback_vf; ndev->mtu = mtu; - ret = netvsc_attach(ndev, &device_info); - if (ret) - goto rollback; - - return 0; + ret = netvsc_attach(ndev, device_info); + if (!ret) + goto out; -rollback: /* Attempt rollback to original MTU */ ndev->mtu = orig_mtu; - if (netvsc_attach(ndev, &device_info)) + if (netvsc_attach(ndev, device_info)) netdev_err(ndev, "restoring mtu failed\n"); rollback_vf: if (vf_netdev) dev_set_mtu(vf_netdev, orig_mtu); +out: + kfree(device_info); return ret; } @@ -1674,7 +1706,7 @@ static int netvsc_set_ringparam(struct net_device *ndev, { struct net_device_context *ndevctx = netdev_priv(ndev); struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); - struct netvsc_device_info device_info; + struct netvsc_device_info *device_info; struct ethtool_ringparam orig; u32 new_tx, new_rx; int ret = 0; @@ -1694,26 +1726,29 @@ static int netvsc_set_ringparam(struct net_device *ndev, new_rx == orig.rx_pending) return 0; /* no change */ - memset(&device_info, 0, sizeof(device_info)); - device_info.num_chn = nvdev->num_chn; - device_info.send_sections = new_tx; - device_info.send_section_size = nvdev->send_section_size; - device_info.recv_sections = new_rx; - device_info.recv_section_size = nvdev->recv_section_size; + device_info = netvsc_devinfo_get(nvdev); + + if (!device_info) + return -ENOMEM; + + device_info->send_sections = new_tx; + device_info->recv_sections = new_rx; ret = netvsc_detach(ndev, nvdev); if (ret) - return ret; + goto out; - ret = netvsc_attach(ndev, &device_info); + ret = netvsc_attach(ndev, device_info); if (ret) { - device_info.send_sections = orig.tx_pending; - device_info.recv_sections = orig.rx_pending; + device_info->send_sections = orig.tx_pending; + device_info->recv_sections = orig.rx_pending; - if (netvsc_attach(ndev, &device_info)) + if (netvsc_attach(ndev, device_info)) netdev_err(ndev, "restoring ringparam failed"); } +out: + kfree(device_info); return ret; } @@ -2088,7 +2123,7 @@ static int netvsc_register_vf(struct net_device *vf_netdev) if (!netvsc_dev || rtnl_dereference(net_device_ctx->vf_netdev)) return NOTIFY_DONE; - /* if syntihetic interface is a different namespace, + /* if synthetic interface is a different namespace, * then move the VF to that namespace; join will be * done again in that context. */ @@ -2167,7 +2202,7 @@ static int netvsc_probe(struct hv_device *dev, { struct net_device *net = NULL; struct net_device_context *net_device_ctx; - struct netvsc_device_info device_info; + struct netvsc_device_info *device_info = NULL; struct netvsc_device *nvdev; int ret = -ENOMEM; @@ -2214,21 +2249,21 @@ static int netvsc_probe(struct hv_device *dev, netif_set_real_num_rx_queues(net, 1); /* Notify the netvsc driver of the new device */ - memset(&device_info, 0, sizeof(device_info)); - device_info.num_chn = VRSS_CHANNEL_DEFAULT; - device_info.send_sections = NETVSC_DEFAULT_TX; - device_info.send_section_size = NETVSC_SEND_SECTION_SIZE; - device_info.recv_sections = NETVSC_DEFAULT_RX; - device_info.recv_section_size = NETVSC_RECV_SECTION_SIZE; - - nvdev = rndis_filter_device_add(dev, &device_info); + device_info = netvsc_devinfo_get(NULL); + + if (!device_info) { + ret = -ENOMEM; + goto devinfo_failed; + } + + nvdev = rndis_filter_device_add(dev, device_info); if (IS_ERR(nvdev)) { ret = PTR_ERR(nvdev); netdev_err(net, "unable to add netvsc device (ret %d)\n", ret); goto rndis_failed; } - memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN); + memcpy(net->dev_addr, device_info->mac_adr, ETH_ALEN); /* We must get rtnl lock before scheduling nvdev->subchan_work, * otherwise netvsc_subchan_work() can get rtnl lock first and wait @@ -2236,7 +2271,7 @@ static int netvsc_probe(struct hv_device *dev, * netvsc_probe() can't get rtnl lock and as a result vmbus_onoffer() * -> ... -> device_add() -> ... -> __device_attach() can't get * the device lock, so all the subchannels can't be processed -- - * finally netvsc_subchan_work() hangs for ever. + * finally netvsc_subchan_work() hangs forever. */ rtnl_lock(); @@ -2266,12 +2301,16 @@ static int netvsc_probe(struct hv_device *dev, list_add(&net_device_ctx->list, &netvsc_dev_list); rtnl_unlock(); + + kfree(device_info); return 0; register_failed: rtnl_unlock(); rndis_filter_device_remove(dev, nvdev); rndis_failed: + kfree(device_info); +devinfo_failed: free_percpu(net_device_ctx->vf_stats); no_stats: hv_set_drvdata(dev, NULL); diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 8b537a049c1e..73b60592de06 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -774,8 +774,8 @@ cleanup: return ret; } -int rndis_filter_set_rss_param(struct rndis_device *rdev, - const u8 *rss_key) +static int rndis_set_rss_param_msg(struct rndis_device *rdev, + const u8 *rss_key, u16 flag) { struct net_device *ndev = rdev->ndev; struct rndis_request *request; @@ -804,7 +804,7 @@ int rndis_filter_set_rss_param(struct rndis_device *rdev, rssp->hdr.type = NDIS_OBJECT_TYPE_RSS_PARAMETERS; rssp->hdr.rev = NDIS_RECEIVE_SCALE_PARAMETERS_REVISION_2; rssp->hdr.size = sizeof(struct ndis_recv_scale_param); - rssp->flag = 0; + rssp->flag = flag; rssp->hashinfo = NDIS_HASH_FUNC_TOEPLITZ | NDIS_HASH_IPV4 | NDIS_HASH_TCP_IPV4 | NDIS_HASH_IPV6 | NDIS_HASH_TCP_IPV6; @@ -829,9 +829,12 @@ int rndis_filter_set_rss_param(struct rndis_device *rdev, wait_for_completion(&request->wait_event); set_complete = &request->response_msg.msg.set_complete; - if (set_complete->status == RNDIS_STATUS_SUCCESS) - memcpy(rdev->rss_key, rss_key, NETVSC_HASH_KEYLEN); - else { + if (set_complete->status == RNDIS_STATUS_SUCCESS) { + if (!(flag & NDIS_RSS_PARAM_FLAG_DISABLE_RSS) && + !(flag & NDIS_RSS_PARAM_FLAG_HASH_KEY_UNCHANGED)) + memcpy(rdev->rss_key, rss_key, NETVSC_HASH_KEYLEN); + + } else { netdev_err(ndev, "Fail to set RSS parameters:0x%x\n", set_complete->status); ret = -EINVAL; @@ -842,6 +845,16 @@ cleanup: return ret; } +int rndis_filter_set_rss_param(struct rndis_device *rdev, + const u8 *rss_key) +{ + /* Disable RSS before change */ + rndis_set_rss_param_msg(rdev, rss_key, + NDIS_RSS_PARAM_FLAG_DISABLE_RSS); + + return rndis_set_rss_param_msg(rdev, rss_key, 0); +} + static int rndis_filter_query_device_link_status(struct rndis_device *dev, struct netvsc_device *net_device) { @@ -1121,7 +1134,9 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc) * This breaks overlap of processing the host message for the * new primary channel with the initialization of sub-channels. */ -int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev) +int rndis_set_subchannel(struct net_device *ndev, + struct netvsc_device *nvdev, + struct netvsc_device_info *dev_info) { struct nvsp_message *init_packet = &nvdev->channel_init_pkt; struct net_device_context *ndev_ctx = netdev_priv(ndev); @@ -1161,8 +1176,11 @@ int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev) wait_event(nvdev->subchan_open, atomic_read(&nvdev->open_chn) == nvdev->num_chn); - /* ignore failues from setting rss parameters, still have channels */ - rndis_filter_set_rss_param(rdev, netvsc_hash_key); + /* ignore failures from setting rss parameters, still have channels */ + if (dev_info) + rndis_filter_set_rss_param(rdev, dev_info->rss_key); + else + rndis_filter_set_rss_param(rdev, netvsc_hash_key); netif_set_real_num_tx_queues(ndev, nvdev->num_chn); netif_set_real_num_rx_queues(ndev, nvdev->num_chn); diff --git a/drivers/net/phy/asix.c b/drivers/net/phy/asix.c index 8ebe7f5484ae..f14ba5366b91 100644 --- a/drivers/net/phy/asix.c +++ b/drivers/net/phy/asix.c @@ -1,13 +1,7 @@ -// SPDX-License-Identifier: GPL-2.0 +// SPDX-License-Identifier: GPL-2.0+ /* Driver for Asix PHYs * * Author: Michael Schmitz <schmitzmic@gmail.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - * */ #include <linux/kernel.h> #include <linux/errno.h> diff --git a/drivers/net/phy/mdio-hisi-femac.c b/drivers/net/phy/mdio-hisi-femac.c index b03fedd6c1d8..287f3ccf1da1 100644 --- a/drivers/net/phy/mdio-hisi-femac.c +++ b/drivers/net/phy/mdio-hisi-femac.c @@ -1,20 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * Hisilicon Fast Ethernet MDIO Bus Driver * * Copyright (c) 2016 HiSilicon Technologies Co., Ltd. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/clk.h> @@ -163,4 +151,4 @@ module_platform_driver(hisi_femac_mdio_driver); MODULE_DESCRIPTION("Hisilicon Fast Ethernet MAC MDIO interface driver"); MODULE_AUTHOR("Dongpo Li <lidongpo@hisilicon.com>"); -MODULE_LICENSE("GPL v2"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index bf3ce48a1e5d..46c86725a693 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -2255,6 +2255,11 @@ int phy_driver_register(struct phy_driver *new_driver, struct module *owner) { int retval; + if (WARN_ON(!new_driver->features)) { + pr_err("%s: Driver features are missing\n", new_driver->name); + return -EINVAL; + } + new_driver->mdiodrv.flags |= MDIO_DEVICE_IS_PHY; new_driver->mdiodrv.driver.name = new_driver->name; new_driver->mdiodrv.driver.bus = &mdio_bus_type; diff --git a/drivers/net/phy/rockchip.c b/drivers/net/phy/rockchip.c index f1da70b9b55f..95abf7072f32 100644 --- a/drivers/net/phy/rockchip.c +++ b/drivers/net/phy/rockchip.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0+ /** * drivers/net/phy/rockchip.c * @@ -6,12 +7,6 @@ * Copyright (c) 2017, Fuzhou Rockchip Electronics Co., Ltd * * David Wu <david.wu@rock-chips.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * */ #include <linux/ethtool.h> @@ -229,4 +224,4 @@ MODULE_DEVICE_TABLE(mdio, rockchip_phy_tbl); MODULE_AUTHOR("David Wu <david.wu@rock-chips.com>"); MODULE_DESCRIPTION("Rockchip Ethernet PHY driver"); -MODULE_LICENSE("GPL v2"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c index b654f05b2ccd..3d93993e74da 100644 --- a/drivers/net/usb/asix_devices.c +++ b/drivers/net/usb/asix_devices.c @@ -739,8 +739,13 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf) asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG, 0, 0, 1, &chipcode, 0); chipcode &= AX_CHIPCODE_MASK; - (chipcode == AX_AX88772_CHIPCODE) ? ax88772_hw_reset(dev, 0) : - ax88772a_hw_reset(dev, 0); + ret = (chipcode == AX_AX88772_CHIPCODE) ? ax88772_hw_reset(dev, 0) : + ax88772a_hw_reset(dev, 0); + + if (ret < 0) { + netdev_dbg(dev->net, "Failed to reset AX88772: %d\n", ret); + return ret; + } /* Read PHYID register *AFTER* the PHY was reset properly */ phyid = asix_get_phyid(dev); diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 023725086046..8fadd8eaf601 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -1330,7 +1330,7 @@ static int virtnet_receive(struct receive_queue *rq, int budget, return stats.packets; } -static void free_old_xmit_skbs(struct send_queue *sq) +static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi) { struct sk_buff *skb; unsigned int len; @@ -1343,7 +1343,7 @@ static void free_old_xmit_skbs(struct send_queue *sq) bytes += skb->len; packets++; - dev_consume_skb_any(skb); + napi_consume_skb(skb, in_napi); } /* Avoid overhead when no packets have been processed @@ -1369,7 +1369,7 @@ static void virtnet_poll_cleantx(struct receive_queue *rq) return; if (__netif_tx_trylock(txq)) { - free_old_xmit_skbs(sq); + free_old_xmit_skbs(sq, true); __netif_tx_unlock(txq); } @@ -1445,7 +1445,7 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget) struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, vq2txq(sq->vq)); __netif_tx_lock(txq, raw_smp_processor_id()); - free_old_xmit_skbs(sq); + free_old_xmit_skbs(sq, true); __netif_tx_unlock(txq); virtqueue_napi_complete(napi, sq->vq, 0); @@ -1514,7 +1514,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) bool use_napi = sq->napi.weight; /* Free up any pending old buffers before queueing new ones. */ - free_old_xmit_skbs(sq); + free_old_xmit_skbs(sq, false); if (use_napi && kick) virtqueue_enable_cb_delayed(sq->vq); @@ -1557,7 +1557,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) if (!use_napi && unlikely(!virtqueue_enable_cb_delayed(sq->vq))) { /* More just got used, free them then recheck. */ - free_old_xmit_skbs(sq); + free_old_xmit_skbs(sq, false); if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) { netif_start_subqueue(dev, qnum); virtqueue_disable_cb(sq->vq); diff --git a/drivers/nvdimm/nd-core.h b/drivers/nvdimm/nd-core.h index 2b2cf4e554d3..e5ffd5733540 100644 --- a/drivers/nvdimm/nd-core.h +++ b/drivers/nvdimm/nd-core.h @@ -54,12 +54,12 @@ struct nvdimm { }; static inline enum nvdimm_security_state nvdimm_security_state( - struct nvdimm *nvdimm, bool master) + struct nvdimm *nvdimm, enum nvdimm_passphrase_type ptype) { if (!nvdimm->sec.ops) return -ENXIO; - return nvdimm->sec.ops->state(nvdimm, master); + return nvdimm->sec.ops->state(nvdimm, ptype); } int nvdimm_security_freeze(struct nvdimm *nvdimm); #if IS_ENABLED(CONFIG_NVDIMM_KEYS) diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index deb1a66bf117..9bc585415d9b 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -2041,14 +2041,18 @@ static int nvme_setup_host_mem(struct nvme_dev *dev) return ret; } +/* irq_queues covers admin queue */ static void nvme_calc_io_queues(struct nvme_dev *dev, unsigned int irq_queues) { unsigned int this_w_queues = write_queues; + WARN_ON(!irq_queues); + /* - * Setup read/write queue split + * Setup read/write queue split, assign admin queue one independent + * irq vector if irq_queues is > 1. */ - if (irq_queues == 1) { + if (irq_queues <= 2) { dev->io_queues[HCTX_TYPE_DEFAULT] = 1; dev->io_queues[HCTX_TYPE_READ] = 0; return; @@ -2056,21 +2060,21 @@ static void nvme_calc_io_queues(struct nvme_dev *dev, unsigned int irq_queues) /* * If 'write_queues' is set, ensure it leaves room for at least - * one read queue + * one read queue and one admin queue */ if (this_w_queues >= irq_queues) - this_w_queues = irq_queues - 1; + this_w_queues = irq_queues - 2; /* * If 'write_queues' is set to zero, reads and writes will share * a queue set. */ if (!this_w_queues) { - dev->io_queues[HCTX_TYPE_DEFAULT] = irq_queues; + dev->io_queues[HCTX_TYPE_DEFAULT] = irq_queues - 1; dev->io_queues[HCTX_TYPE_READ] = 0; } else { dev->io_queues[HCTX_TYPE_DEFAULT] = this_w_queues; - dev->io_queues[HCTX_TYPE_READ] = irq_queues - this_w_queues; + dev->io_queues[HCTX_TYPE_READ] = irq_queues - this_w_queues - 1; } } @@ -2095,7 +2099,7 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues) this_p_queues = nr_io_queues - 1; irq_queues = 1; } else { - irq_queues = nr_io_queues - this_p_queues; + irq_queues = nr_io_queues - this_p_queues + 1; } dev->io_queues[HCTX_TYPE_POLL] = this_p_queues; @@ -2115,8 +2119,9 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues) * If we got a failure and we're down to asking for just * 1 + 1 queues, just ask for a single vector. We'll share * that between the single IO queue and the admin queue. + * Otherwise, we assign one independent vector to admin queue. */ - if (result >= 0 && irq_queues > 1) + if (irq_queues > 1) irq_queues = irq_sets[0] + irq_sets[1] + 1; result = pci_alloc_irq_vectors_affinity(pdev, irq_queues, diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c index 44b37b202e39..ad0df786fe93 100644 --- a/drivers/nvme/target/tcp.c +++ b/drivers/nvme/target/tcp.c @@ -1089,7 +1089,7 @@ out: static int nvmet_tcp_try_recv_one(struct nvmet_tcp_queue *queue) { - int result; + int result = 0; if (unlikely(queue->rcv_state == NVMET_TCP_RECV_ERR)) return 0; diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c index a09c1c3cf831..49b16f76d78e 100644 --- a/drivers/of/dynamic.c +++ b/drivers/of/dynamic.c @@ -207,11 +207,8 @@ static void __of_attach_node(struct device_node *np) if (!of_node_check_flag(np, OF_OVERLAY)) { np->name = __of_get_property(np, "name", NULL); - np->type = __of_get_property(np, "device_type", NULL); if (!np->name) np->name = "<NULL>"; - if (!np->type) - np->type = "<NULL>"; phandle = __of_get_property(np, "phandle", &sz); if (!phandle) diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index 7099c652c6a5..9cc1461aac7d 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c @@ -314,12 +314,8 @@ static bool populate_node(const void *blob, populate_properties(blob, offset, mem, np, pathp, dryrun); if (!dryrun) { np->name = of_get_property(np, "name", NULL); - np->type = of_get_property(np, "device_type", NULL); - if (!np->name) np->name = "<NULL>"; - if (!np->type) - np->type = "<NULL>"; } *pnp = np; diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c index 2b5ac43a5690..c423e94baf0f 100644 --- a/drivers/of/overlay.c +++ b/drivers/of/overlay.c @@ -423,12 +423,9 @@ static int add_changeset_node(struct overlay_changeset *ovcs, tchild->parent = target->np; tchild->name = __of_get_property(node, "name", NULL); - tchild->type = __of_get_property(node, "device_type", NULL); if (!tchild->name) tchild->name = "<NULL>"; - if (!tchild->type) - tchild->type = "<NULL>"; /* ignore obsolete "linux,phandle" */ phandle = __of_get_property(node, "phandle", &size); diff --git a/drivers/of/pdt.c b/drivers/of/pdt.c index d3185063d369..7eda43c66c91 100644 --- a/drivers/of/pdt.c +++ b/drivers/of/pdt.c @@ -155,7 +155,6 @@ static struct device_node * __init of_pdt_create_node(phandle node, dp->parent = parent; dp->name = of_pdt_get_one_property(node, "name"); - dp->type = of_pdt_get_one_property(node, "device_type"); dp->phandle = node; dp->properties = of_pdt_build_prop_list(node); diff --git a/drivers/of/property.c b/drivers/of/property.c index 08430031bd28..8631efa1daa1 100644 --- a/drivers/of/property.c +++ b/drivers/of/property.c @@ -806,6 +806,7 @@ struct device_node *of_graph_get_remote_node(const struct device_node *node, if (!of_device_is_available(remote)) { pr_debug("not available for remote node\n"); + of_node_put(remote); return NULL; } diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig index 4310c7a4212e..2ab92409210a 100644 --- a/drivers/pci/Kconfig +++ b/drivers/pci/Kconfig @@ -21,13 +21,14 @@ menuconfig PCI support for PCI-X and the foundations for PCI Express support. Say 'Y' here unless you know what you are doing. +if PCI + config PCI_DOMAINS bool depends on PCI config PCI_DOMAINS_GENERIC bool - depends on PCI select PCI_DOMAINS config PCI_SYSCALL @@ -37,7 +38,6 @@ source "drivers/pci/pcie/Kconfig" config PCI_MSI bool "Message Signaled Interrupts (MSI and MSI-X)" - depends on PCI select GENERIC_MSI_IRQ help This allows device drivers to enable MSI (Message Signaled @@ -59,7 +59,6 @@ config PCI_MSI_IRQ_DOMAIN config PCI_QUIRKS default y bool "Enable PCI quirk workarounds" if EXPERT - depends on PCI help This enables workarounds for various PCI chipset bugs/quirks. Disable this only if your target machine is unaffected by PCI @@ -67,7 +66,7 @@ config PCI_QUIRKS config PCI_DEBUG bool "PCI Debugging" - depends on PCI && DEBUG_KERNEL + depends on DEBUG_KERNEL help Say Y here if you want the PCI core to produce a bunch of debug messages to the system log. Select this if you are having a @@ -77,7 +76,6 @@ config PCI_DEBUG config PCI_REALLOC_ENABLE_AUTO bool "Enable PCI resource re-allocation detection" - depends on PCI depends on PCI_IOV help Say Y here if you want the PCI core to detect if PCI resource @@ -90,7 +88,6 @@ config PCI_REALLOC_ENABLE_AUTO config PCI_STUB tristate "PCI Stub driver" - depends on PCI help Say Y or M here if you want be able to reserve a PCI device when it is going to be assigned to a guest operating system. @@ -99,7 +96,6 @@ config PCI_STUB config PCI_PF_STUB tristate "PCI PF Stub driver" - depends on PCI depends on PCI_IOV help Say Y or M here if you want to enable support for devices that @@ -111,7 +107,7 @@ config PCI_PF_STUB config XEN_PCIDEV_FRONTEND tristate "Xen PCI Frontend" - depends on PCI && X86 && XEN + depends on X86 && XEN select PCI_XEN select XEN_XENBUS_FRONTEND default y @@ -133,7 +129,6 @@ config PCI_BRIDGE_EMUL config PCI_IOV bool "PCI IOV support" - depends on PCI select PCI_ATS help I/O Virtualization is a PCI feature supported by some devices @@ -144,7 +139,6 @@ config PCI_IOV config PCI_PRI bool "PCI PRI support" - depends on PCI select PCI_ATS help PRI is the PCI Page Request Interface. It allows PCI devices that are @@ -154,7 +148,6 @@ config PCI_PRI config PCI_PASID bool "PCI PASID support" - depends on PCI select PCI_ATS help Process Address Space Identifiers (PASIDs) can be used by PCI devices @@ -167,7 +160,7 @@ config PCI_PASID config PCI_P2PDMA bool "PCI peer-to-peer transfer support" - depends on PCI && ZONE_DEVICE + depends on ZONE_DEVICE select GENERIC_ALLOCATOR help EnableÑ• drivers to do PCI peer-to-peer transactions to and from @@ -184,12 +177,11 @@ config PCI_P2PDMA config PCI_LABEL def_bool y if (DMI || ACPI) - depends on PCI select NLS config PCI_HYPERV tristate "Hyper-V PCI Frontend" - depends on PCI && X86 && HYPERV && PCI_MSI && PCI_MSI_IRQ_DOMAIN && X86_64 + depends on X86 && HYPERV && PCI_MSI && PCI_MSI_IRQ_DOMAIN && X86_64 help The PCI device frontend driver allows the kernel to import arbitrary PCI devices from a PCI backend to support PCI driver domains. @@ -198,3 +190,5 @@ source "drivers/pci/hotplug/Kconfig" source "drivers/pci/controller/Kconfig" source "drivers/pci/endpoint/Kconfig" source "drivers/pci/switch/Kconfig" + +endif diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 7a1c8a09efa5..4c0b47867258 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -1168,7 +1168,8 @@ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, const struct irq_affinity *affd) { static const struct irq_affinity msi_default_affd; - int vecs = -ENOSPC; + int msix_vecs = -ENOSPC; + int msi_vecs = -ENOSPC; if (flags & PCI_IRQ_AFFINITY) { if (!affd) @@ -1179,16 +1180,17 @@ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, } if (flags & PCI_IRQ_MSIX) { - vecs = __pci_enable_msix_range(dev, NULL, min_vecs, max_vecs, - affd); - if (vecs > 0) - return vecs; + msix_vecs = __pci_enable_msix_range(dev, NULL, min_vecs, + max_vecs, affd); + if (msix_vecs > 0) + return msix_vecs; } if (flags & PCI_IRQ_MSI) { - vecs = __pci_enable_msi_range(dev, min_vecs, max_vecs, affd); - if (vecs > 0) - return vecs; + msi_vecs = __pci_enable_msi_range(dev, min_vecs, max_vecs, + affd); + if (msi_vecs > 0) + return msi_vecs; } /* use legacy irq if allowed */ @@ -1199,7 +1201,9 @@ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, } } - return vecs; + if (msix_vecs == -ENOSPC) + return -ENOSPC; + return msi_vecs; } EXPORT_SYMBOL(pci_alloc_irq_vectors_affinity); diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index c9d8e3c837de..c25acace7d91 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -6195,7 +6195,8 @@ static int __init pci_setup(char *str) } else if (!strncmp(str, "pcie_scan_all", 13)) { pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS); } else if (!strncmp(str, "disable_acs_redir=", 18)) { - disable_acs_redir_param = str + 18; + disable_acs_redir_param = + kstrdup(str + 18, GFP_KERNEL); } else { printk(KERN_ERR "PCI: Unknown option `%s'\n", str); diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index e3b62c2ee8d1..5e2109c54c7c 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig @@ -1009,7 +1009,7 @@ config INTEL_MFLD_THERMAL config INTEL_IPS tristate "Intel Intelligent Power Sharing" - depends on ACPI + depends on ACPI && PCI ---help--- Intel Calpella platforms support dynamic power sharing between the CPU and GPU, maximizing performance in a given TDP. This driver, @@ -1135,7 +1135,7 @@ config SAMSUNG_Q10 config APPLE_GMUX tristate "Apple Gmux Driver" - depends on ACPI + depends on ACPI && PCI depends on PNP depends on BACKLIGHT_CLASS_DEVICE depends on BACKLIGHT_APPLE=n || BACKLIGHT_APPLE @@ -1174,7 +1174,7 @@ config INTEL_SMARTCONNECT config INTEL_PMC_IPC tristate "Intel PMC IPC Driver" - depends on ACPI + depends on ACPI && PCI ---help--- This driver provides support for PMC control on some Intel platforms. The PMC is an ARC processor which defines IPC commands for communication diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c index 183fc42a510a..2d7cd344f3bf 100644 --- a/drivers/remoteproc/remoteproc_virtio.c +++ b/drivers/remoteproc/remoteproc_virtio.c @@ -153,10 +153,15 @@ static int rproc_virtio_find_vqs(struct virtio_device *vdev, unsigned int nvqs, const bool * ctx, struct irq_affinity *desc) { - int i, ret; + int i, ret, queue_idx = 0; for (i = 0; i < nvqs; ++i) { - vqs[i] = rp_find_vq(vdev, i, callbacks[i], names[i], + if (!names[i]) { + vqs[i] = NULL; + continue; + } + + vqs[i] = rp_find_vq(vdev, queue_idx++, callbacks[i], names[i], ctx ? ctx[i] : false); if (IS_ERR(vqs[i])) { ret = PTR_ERR(vqs[i]); diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c index fc9dbad476c0..ae1d56da671d 100644 --- a/drivers/s390/virtio/virtio_ccw.c +++ b/drivers/s390/virtio/virtio_ccw.c @@ -635,7 +635,7 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs, { struct virtio_ccw_device *vcdev = to_vc_device(vdev); unsigned long *indicatorp = NULL; - int ret, i; + int ret, i, queue_idx = 0; struct ccw1 *ccw; ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); @@ -643,8 +643,14 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs, return -ENOMEM; for (i = 0; i < nvqs; ++i) { - vqs[i] = virtio_ccw_setup_vq(vdev, i, callbacks[i], names[i], - ctx ? ctx[i] : false, ccw); + if (!names[i]) { + vqs[i] = NULL; + continue; + } + + vqs[i] = virtio_ccw_setup_vq(vdev, queue_idx++, callbacks[i], + names[i], ctx ? ctx[i] : false, + ccw); if (IS_ERR(vqs[i])) { ret = PTR_ERR(vqs[i]); vqs[i] = NULL; diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c index 8a20411699d9..75e1273a44b3 100644 --- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c +++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c @@ -1144,7 +1144,7 @@ static void ddp_clear_map(struct cxgbi_device *cdev, struct cxgbi_ppm *ppm, } static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, - unsigned int tid, int pg_idx, bool reply) + unsigned int tid, int pg_idx) { struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0, GFP_KERNEL); @@ -1160,7 +1160,7 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, req = (struct cpl_set_tcb_field *)skb->head; req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); - req->reply = V_NO_REPLY(reply ? 0 : 1); + req->reply = V_NO_REPLY(1); req->cpu_idx = 0; req->word = htons(31); req->mask = cpu_to_be64(0xF0000000); @@ -1177,11 +1177,10 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, * @tid: connection id * @hcrc: header digest enabled * @dcrc: data digest enabled - * @reply: request reply from h/w * set up the iscsi digest settings for a connection identified by tid */ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, - int hcrc, int dcrc, int reply) + int hcrc, int dcrc) { struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0, GFP_KERNEL); @@ -1197,7 +1196,7 @@ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, req = (struct cpl_set_tcb_field *)skb->head; req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); - req->reply = V_NO_REPLY(reply ? 0 : 1); + req->reply = V_NO_REPLY(1); req->cpu_idx = 0; req->word = htons(31); req->mask = cpu_to_be64(0x0F000000); diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c index 49f8028ac524..d26f50af00ea 100644 --- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c +++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c @@ -1548,16 +1548,22 @@ static void do_set_tcb_rpl(struct cxgbi_device *cdev, struct sk_buff *skb) struct cxgbi_sock *csk; csk = lookup_tid(t, tid); - if (!csk) + if (!csk) { pr_err("can't find conn. for tid %u.\n", tid); + return; + } log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,%lx,%u, status 0x%x.\n", csk, csk->state, csk->flags, csk->tid, rpl->status); - if (rpl->status != CPL_ERR_NONE) + if (rpl->status != CPL_ERR_NONE) { pr_err("csk 0x%p,%u, SET_TCB_RPL status %u.\n", csk, tid, rpl->status); + csk->err = -EINVAL; + } + + complete(&csk->cmpl); __kfree_skb(skb); } @@ -1983,7 +1989,7 @@ static int ddp_set_map(struct cxgbi_ppm *ppm, struct cxgbi_sock *csk, } static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid, - int pg_idx, bool reply) + int pg_idx) { struct sk_buff *skb; struct cpl_set_tcb_field *req; @@ -1999,7 +2005,7 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid, req = (struct cpl_set_tcb_field *)skb->head; INIT_TP_WR(req, csk->tid); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid)); - req->reply_ctrl = htons(NO_REPLY_V(reply) | QUEUENO_V(csk->rss_qid)); + req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid)); req->word_cookie = htons(0); req->mask = cpu_to_be64(0x3 << 8); req->val = cpu_to_be64(pg_idx << 8); @@ -2008,12 +2014,15 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid, log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, "csk 0x%p, tid 0x%x, pg_idx %u.\n", csk, csk->tid, pg_idx); + reinit_completion(&csk->cmpl); cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); - return 0; + wait_for_completion(&csk->cmpl); + + return csk->err; } static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, - int hcrc, int dcrc, int reply) + int hcrc, int dcrc) { struct sk_buff *skb; struct cpl_set_tcb_field *req; @@ -2031,7 +2040,7 @@ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, req = (struct cpl_set_tcb_field *)skb->head; INIT_TP_WR(req, tid); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); - req->reply_ctrl = htons(NO_REPLY_V(reply) | QUEUENO_V(csk->rss_qid)); + req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid)); req->word_cookie = htons(0); req->mask = cpu_to_be64(0x3 << 4); req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) | @@ -2041,8 +2050,11 @@ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, "csk 0x%p, tid 0x%x, crc %d,%d.\n", csk, csk->tid, hcrc, dcrc); + reinit_completion(&csk->cmpl); cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); - return 0; + wait_for_completion(&csk->cmpl); + + return csk->err; } static struct cxgbi_ppm *cdev2ppm(struct cxgbi_device *cdev) diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c index 75f876409fb9..245742557c03 100644 --- a/drivers/scsi/cxgbi/libcxgbi.c +++ b/drivers/scsi/cxgbi/libcxgbi.c @@ -573,6 +573,7 @@ static struct cxgbi_sock *cxgbi_sock_create(struct cxgbi_device *cdev) skb_queue_head_init(&csk->receive_queue); skb_queue_head_init(&csk->write_queue); timer_setup(&csk->retry_timer, NULL, 0); + init_completion(&csk->cmpl); rwlock_init(&csk->callback_lock); csk->cdev = cdev; csk->flags = 0; @@ -2251,14 +2252,14 @@ int cxgbi_set_conn_param(struct iscsi_cls_conn *cls_conn, if (!err && conn->hdrdgst_en) err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid, conn->hdrdgst_en, - conn->datadgst_en, 0); + conn->datadgst_en); break; case ISCSI_PARAM_DATADGST_EN: err = iscsi_set_param(cls_conn, param, buf, buflen); if (!err && conn->datadgst_en) err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid, conn->hdrdgst_en, - conn->datadgst_en, 0); + conn->datadgst_en); break; case ISCSI_PARAM_MAX_R2T: return iscsi_tcp_set_max_r2t(conn, buf); @@ -2384,7 +2385,7 @@ int cxgbi_bind_conn(struct iscsi_cls_session *cls_session, ppm = csk->cdev->cdev2ppm(csk->cdev); err = csk->cdev->csk_ddp_setup_pgidx(csk, csk->tid, - ppm->tformat.pgsz_idx_dflt, 0); + ppm->tformat.pgsz_idx_dflt); if (err < 0) return err; diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h index 5d5d8b50d842..1917ff57651d 100644 --- a/drivers/scsi/cxgbi/libcxgbi.h +++ b/drivers/scsi/cxgbi/libcxgbi.h @@ -149,6 +149,7 @@ struct cxgbi_sock { struct sk_buff_head receive_queue; struct sk_buff_head write_queue; struct timer_list retry_timer; + struct completion cmpl; int err; rwlock_t callback_lock; void *user_data; @@ -490,9 +491,9 @@ struct cxgbi_device { struct cxgbi_ppm *, struct cxgbi_task_tag_info *); int (*csk_ddp_setup_digest)(struct cxgbi_sock *, - unsigned int, int, int, int); + unsigned int, int, int); int (*csk_ddp_setup_pgidx)(struct cxgbi_sock *, - unsigned int, int, bool); + unsigned int, int); void (*csk_release_offload_resources)(struct cxgbi_sock *); int (*csk_rx_pdu_ready)(struct cxgbi_sock *, struct sk_buff *); diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c index e2420a810e99..c92b3822c408 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c @@ -2507,6 +2507,12 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id) sha->sas_port[i] = &hisi_hba->port[i].sas_port; } + if (hisi_hba->prot_mask) { + dev_info(dev, "Registering for DIF/DIX prot_mask=0x%x\n", + prot_mask); + scsi_host_set_prot(hisi_hba->shost, prot_mask); + } + rc = scsi_add_host(shost, dev); if (rc) goto err_out_ha; @@ -2519,12 +2525,6 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (rc) goto err_out_register_ha; - if (hisi_hba->prot_mask) { - dev_info(dev, "Registering for DIF/DIX prot_mask=0x%x\n", - prot_mask); - scsi_host_set_prot(hisi_hba->shost, prot_mask); - } - scsi_scan_host(shost); return 0; diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c index 68b90c4f79a3..1727d0c71b12 100644 --- a/drivers/scsi/isci/init.c +++ b/drivers/scsi/isci/init.c @@ -576,6 +576,13 @@ static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id) shost->max_lun = ~0; shost->max_cmd_len = MAX_COMMAND_SIZE; + /* turn on DIF support */ + scsi_host_set_prot(shost, + SHOST_DIF_TYPE1_PROTECTION | + SHOST_DIF_TYPE2_PROTECTION | + SHOST_DIF_TYPE3_PROTECTION); + scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC); + err = scsi_add_host(shost, &pdev->dev); if (err) goto err_shost; @@ -663,13 +670,6 @@ static int isci_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto err_host_alloc; } pci_info->hosts[i] = h; - - /* turn on DIF support */ - scsi_host_set_prot(to_shost(h), - SHOST_DIF_TYPE1_PROTECTION | - SHOST_DIF_TYPE2_PROTECTION | - SHOST_DIF_TYPE3_PROTECTION); - scsi_host_set_guard(to_shost(h), SHOST_DIX_GUARD_CRC); } err = isci_setup_interrupts(pdev); diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 12fd74761ae0..2242e9b3ca12 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -9407,6 +9407,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, cmnd = CMD_XMIT_SEQUENCE64_CR; if (phba->link_flag & LS_LOOPBACK_MODE) bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1); + /* fall through */ case CMD_XMIT_SEQUENCE64_CR: /* word3 iocb=io_tag32 wqe=reserved */ wqe->xmit_sequence.rsvd3 = 0; @@ -13528,6 +13529,7 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe) case FC_STATUS_RQ_BUF_LEN_EXCEEDED: lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "2537 Receive Frame Truncated!!\n"); + /* fall through */ case FC_STATUS_RQ_SUCCESS: spin_lock_irqsave(&phba->hbalock, iflags); lpfc_sli4_rq_release(hrq, drq); @@ -13937,7 +13939,7 @@ lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, case FC_STATUS_RQ_BUF_LEN_EXCEEDED: lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "6126 Receive Frame Truncated!!\n"); - /* Drop thru */ + /* fall through */ case FC_STATUS_RQ_SUCCESS: spin_lock_irqsave(&phba->hbalock, iflags); lpfc_sli4_rq_release(hrq, drq); @@ -14849,7 +14851,7 @@ lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax) eq->entry_count); if (eq->entry_count < 256) return -EINVAL; - /* otherwise default to smallest count (drop through) */ + /* fall through - otherwise default to smallest count */ case 256: bf_set(lpfc_eq_context_count, &eq_create->u.request.context, LPFC_EQ_CNT_256); @@ -14980,7 +14982,7 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, LPFC_CQ_CNT_WORD7); break; } - /* Fall Thru */ + /* fall through */ default: lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "0361 Unsupported CQ count: " @@ -14991,7 +14993,7 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, status = -EINVAL; goto out; } - /* otherwise default to smallest count (drop through) */ + /* fall through - otherwise default to smallest count */ case 256: bf_set(lpfc_cq_context_count, &cq_create->u.request.context, LPFC_CQ_CNT_256); @@ -15151,7 +15153,7 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp, LPFC_CQ_CNT_WORD7); break; } - /* Fall Thru */ + /* fall through */ default: lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "3118 Bad CQ count. (%d)\n", @@ -15160,7 +15162,7 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp, status = -EINVAL; goto out; } - /* otherwise default to smallest (drop thru) */ + /* fall through - otherwise default to smallest */ case 256: bf_set(lpfc_mbx_cq_create_set_cqe_cnt, &cq_set->u.request, LPFC_CQ_CNT_256); @@ -15432,7 +15434,7 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, status = -EINVAL; goto out; } - /* otherwise default to smallest count (drop through) */ + /* fall through - otherwise default to smallest count */ case 16: bf_set(lpfc_mq_context_ring_size, &mq_create_ext->u.request.context, @@ -15851,7 +15853,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, status = -EINVAL; goto out; } - /* otherwise default to smallest count (drop through) */ + /* fall through - otherwise default to smallest count */ case 512: bf_set(lpfc_rq_context_rqe_count, &rq_create->u.request.context, @@ -15988,7 +15990,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, status = -EINVAL; goto out; } - /* otherwise default to smallest count (drop through) */ + /* fall through - otherwise default to smallest count */ case 512: bf_set(lpfc_rq_context_rqe_count, &rq_create->u.request.context, diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 7eaa400f6328..fcbff83c0097 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -6236,7 +6236,7 @@ megasas_set_dma_mask(struct megasas_instance *instance) instance->consistent_mask_64bit = true; dev_info(&pdev->dev, "%s bit DMA mask and %s bit consistent mask\n", - ((*pdev->dev.dma_mask == DMA_BIT_MASK(64)) ? "63" : "32"), + ((*pdev->dev.dma_mask == DMA_BIT_MASK(63)) ? "63" : "32"), (instance->consistent_mask_64bit ? "63" : "32")); return 0; diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index a9a25f0eaf6f..647f48a28f85 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -175,7 +175,8 @@ megasas_clear_intr_fusion(struct megasas_instance *instance) /* * Check if it is our interrupt */ - status = readl(®s->outbound_intr_status); + status = megasas_readl(instance, + ®s->outbound_intr_status); if (status & 1) { writel(status, ®s->outbound_intr_status); diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c index 4c5a3d23e010..084f2fcced0a 100644 --- a/drivers/scsi/pm8001/pm8001_sas.c +++ b/drivers/scsi/pm8001/pm8001_sas.c @@ -657,7 +657,7 @@ static int pm8001_dev_found_notify(struct domain_device *dev) if (dev->dev_type == SAS_SATA_DEV) { pm8001_device->attached_phy = dev->rphy->identify.phy_identifier; - flag = 1; /* directly sata*/ + flag = 1; /* directly sata */ } } /*register this device to HBA*/ PM8001_DISC_DBG(pm8001_ha, pm8001_printk("Found device\n")); diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c index 4da660c1c431..6d6d6013e35b 100644 --- a/drivers/scsi/qedi/qedi_iscsi.c +++ b/drivers/scsi/qedi/qedi_iscsi.c @@ -953,6 +953,7 @@ static int qedi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) qedi_ep = ep->dd_data; if (qedi_ep->state == EP_STATE_IDLE || + qedi_ep->state == EP_STATE_OFLDCONN_NONE || qedi_ep->state == EP_STATE_OFLDCONN_FAILED) return -1; @@ -1035,6 +1036,7 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep) switch (qedi_ep->state) { case EP_STATE_OFLDCONN_START: + case EP_STATE_OFLDCONN_NONE: goto ep_release_conn; case EP_STATE_OFLDCONN_FAILED: break; @@ -1225,6 +1227,7 @@ static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data) if (!is_valid_ether_addr(&path_data->mac_addr[0])) { QEDI_NOTICE(&qedi->dbg_ctx, "dst mac NOT VALID\n"); + qedi_ep->state = EP_STATE_OFLDCONN_NONE; ret = -EIO; goto set_path_exit; } diff --git a/drivers/scsi/qedi/qedi_iscsi.h b/drivers/scsi/qedi/qedi_iscsi.h index 11260776212f..892d70d54553 100644 --- a/drivers/scsi/qedi/qedi_iscsi.h +++ b/drivers/scsi/qedi/qedi_iscsi.h @@ -59,6 +59,7 @@ enum { EP_STATE_OFLDCONN_FAILED = 0x2000, EP_STATE_CONNECT_FAILED = 0x4000, EP_STATE_DISCONN_TIMEDOUT = 0x8000, + EP_STATE_OFLDCONN_NONE = 0x10000, }; struct qedi_conn; diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c index a414f51302b7..6856dfdfa473 100644 --- a/drivers/scsi/qla1280.c +++ b/drivers/scsi/qla1280.c @@ -4248,7 +4248,7 @@ qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) ha->devnum = devnum; /* specifies microcode load address */ #ifdef QLA_64BIT_PTR - if (dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(64))) { + if (dma_set_mask_and_coherent(&ha->pdev->dev, DMA_BIT_MASK(64))) { if (dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32))) { printk(KERN_WARNING "scsi(%li): Unable to set a " "suitable DMA mask - aborting\n", ha->host_no); diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 26b93c563f92..d1fc4958222a 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -4394,6 +4394,8 @@ typedef struct scsi_qla_host { uint16_t n2n_id; struct list_head gpnid_list; struct fab_scan scan; + + unsigned int irq_offset; } scsi_qla_host_t; struct qla27xx_image_status { diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 30d3090842f8..8507c43b918c 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -3446,6 +3446,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) "Adjusted Max no of queues pairs: %d.\n", ha->max_qpairs); } } + vha->irq_offset = desc.pre_vectors; ha->msix_entries = kcalloc(ha->msix_count, sizeof(struct qla_msix_entry), GFP_KERNEL); diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index ea69dafc9774..c6ef83d0d99b 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -6939,7 +6939,7 @@ static int qla2xxx_map_queues(struct Scsi_Host *shost) if (USER_CTRL_IRQ(vha->hw)) rc = blk_mq_map_queues(qmap); else - rc = blk_mq_pci_map_queues(qmap, vha->hw->pdev, 0); + rc = blk_mq_pci_map_queues(qmap, vha->hw->pdev, vha->irq_offset); return rc; } diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index cfdfcda28072..a77bfb224248 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c @@ -7232,6 +7232,8 @@ static int qla4xxx_sysfs_ddb_tgt_create(struct scsi_qla_host *ha, rc = qla4xxx_copy_from_fwddb_param(fnode_sess, fnode_conn, fw_ddb_entry); + if (rc) + goto free_sess; ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n", __func__, fnode_sess->dev.kobj.name); diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c index a2b4179bfdf7..7639df91b110 100644 --- a/drivers/scsi/scsi_pm.c +++ b/drivers/scsi/scsi_pm.c @@ -80,8 +80,22 @@ static int scsi_dev_type_resume(struct device *dev, if (err == 0) { pm_runtime_disable(dev); - pm_runtime_set_active(dev); + err = pm_runtime_set_active(dev); pm_runtime_enable(dev); + + /* + * Forcibly set runtime PM status of request queue to "active" + * to make sure we can again get requests from the queue + * (see also blk_pm_peek_request()). + * + * The resume hook will correct runtime PM status of the disk. + */ + if (!err && scsi_is_sdev_device(dev)) { + struct scsi_device *sdev = to_scsi_device(dev); + + if (sdev->request_queue->dev) + blk_set_runtime_active(sdev->request_queue); + } } return err; @@ -140,16 +154,6 @@ static int scsi_bus_resume_common(struct device *dev, else fn = NULL; - /* - * Forcibly set runtime PM status of request queue to "active" to - * make sure we can again get requests from the queue (see also - * blk_pm_peek_request()). - * - * The resume hook will correct runtime PM status of the disk. - */ - if (scsi_is_sdev_device(dev) && pm_runtime_suspended(dev)) - blk_set_runtime_active(to_scsi_device(dev)->request_queue); - if (fn) { async_schedule_domain(fn, dev, &scsi_sd_pm_domain); diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index a1a44f52e0e8..b2da8a00ec33 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -206,6 +206,12 @@ cache_type_store(struct device *dev, struct device_attribute *attr, sp = buffer_data[0] & 0x80 ? 1 : 0; buffer_data[0] &= ~0x80; + /* + * Ensure WP, DPOFUA, and RESERVED fields are cleared in + * received mode parameter buffer before doing MODE SELECT. + */ + data.device_specific = 0; + if (scsi_mode_select(sdp, 1, sp, 8, buffer_data, len, SD_TIMEOUT, SD_MAX_RETRIES, &data, &sshdr)) { if (scsi_sense_valid(&sshdr)) diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index 7bde6c809442..f564af8949e8 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -323,7 +323,7 @@ static inline void pqi_device_remove_start(struct pqi_scsi_dev *device) static inline bool pqi_device_in_remove(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device) { - return device->in_remove & !ctrl_info->in_shutdown; + return device->in_remove && !ctrl_info->in_shutdown; } static inline void pqi_schedule_rescan_worker_with_delay( diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h index dd65fea07687..6d176815e6ce 100644 --- a/drivers/scsi/ufs/ufs.h +++ b/drivers/scsi/ufs/ufs.h @@ -195,7 +195,7 @@ enum ufs_desc_def_size { QUERY_DESC_CONFIGURATION_DEF_SIZE = 0x90, QUERY_DESC_UNIT_DEF_SIZE = 0x23, QUERY_DESC_INTERCONNECT_DEF_SIZE = 0x06, - QUERY_DESC_GEOMETRY_DEF_SIZE = 0x44, + QUERY_DESC_GEOMETRY_DEF_SIZE = 0x48, QUERY_DESC_POWER_DEF_SIZE = 0x62, QUERY_DESC_HEALTH_DEF_SIZE = 0x25, }; diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 9ba7671b84f8..71334aaf1447 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -8001,6 +8001,8 @@ out: trace_ufshcd_system_resume(dev_name(hba->dev), ret, ktime_to_us(ktime_sub(ktime_get(), start)), hba->curr_dev_pwr_mode, hba->uic_link_state); + if (!ret) + hba->is_sys_suspended = false; return ret; } EXPORT_SYMBOL(ufshcd_system_resume); diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 984941e036c8..bd15a564fe24 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -714,7 +714,7 @@ static int __init iscsi_target_init_module(void) sizeof(struct iscsi_queue_req), __alignof__(struct iscsi_queue_req), 0, NULL); if (!lio_qr_cache) { - pr_err("nable to kmem_cache_create() for" + pr_err("Unable to kmem_cache_create() for" " lio_qr_cache\n"); goto bitmap_out; } diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 1e6d24943565..c34c88ef3319 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -148,7 +148,7 @@ struct tcmu_dev { size_t ring_size; struct mutex cmdr_lock; - struct list_head cmdr_queue; + struct list_head qfull_queue; uint32_t dbi_max; uint32_t dbi_thresh; @@ -159,6 +159,7 @@ struct tcmu_dev { struct timer_list cmd_timer; unsigned int cmd_time_out; + struct list_head inflight_queue; struct timer_list qfull_timer; int qfull_time_out; @@ -179,7 +180,7 @@ struct tcmu_dev { struct tcmu_cmd { struct se_cmd *se_cmd; struct tcmu_dev *tcmu_dev; - struct list_head cmdr_queue_entry; + struct list_head queue_entry; uint16_t cmd_id; @@ -192,6 +193,7 @@ struct tcmu_cmd { unsigned long deadline; #define TCMU_CMD_BIT_EXPIRED 0 +#define TCMU_CMD_BIT_INFLIGHT 1 unsigned long flags; }; /* @@ -586,7 +588,7 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd) if (!tcmu_cmd) return NULL; - INIT_LIST_HEAD(&tcmu_cmd->cmdr_queue_entry); + INIT_LIST_HEAD(&tcmu_cmd->queue_entry); tcmu_cmd->se_cmd = se_cmd; tcmu_cmd->tcmu_dev = udev; @@ -915,11 +917,13 @@ setup_timer: return 0; tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo)); - mod_timer(timer, tcmu_cmd->deadline); + if (!timer_pending(timer)) + mod_timer(timer, tcmu_cmd->deadline); + return 0; } -static int add_to_cmdr_queue(struct tcmu_cmd *tcmu_cmd) +static int add_to_qfull_queue(struct tcmu_cmd *tcmu_cmd) { struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; unsigned int tmo; @@ -942,7 +946,7 @@ static int add_to_cmdr_queue(struct tcmu_cmd *tcmu_cmd) if (ret) return ret; - list_add_tail(&tcmu_cmd->cmdr_queue_entry, &udev->cmdr_queue); + list_add_tail(&tcmu_cmd->queue_entry, &udev->qfull_queue); pr_debug("adding cmd %u on dev %s to ring space wait queue\n", tcmu_cmd->cmd_id, udev->name); return 0; @@ -999,7 +1003,7 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err) base_command_size = tcmu_cmd_get_base_cmd_size(tcmu_cmd->dbi_cnt); command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size); - if (!list_empty(&udev->cmdr_queue)) + if (!list_empty(&udev->qfull_queue)) goto queue; mb = udev->mb_addr; @@ -1096,13 +1100,16 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err) UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size); tcmu_flush_dcache_range(mb, sizeof(*mb)); + list_add_tail(&tcmu_cmd->queue_entry, &udev->inflight_queue); + set_bit(TCMU_CMD_BIT_INFLIGHT, &tcmu_cmd->flags); + /* TODO: only if FLUSH and FUA? */ uio_event_notify(&udev->uio_info); return 0; queue: - if (add_to_cmdr_queue(tcmu_cmd)) { + if (add_to_qfull_queue(tcmu_cmd)) { *scsi_err = TCM_OUT_OF_RESOURCES; return -1; } @@ -1145,6 +1152,8 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry * if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) goto out; + list_del_init(&cmd->queue_entry); + tcmu_cmd_reset_dbi_cur(cmd); if (entry->hdr.uflags & TCMU_UFLAG_UNKNOWN_OP) { @@ -1194,9 +1203,29 @@ out: tcmu_free_cmd(cmd); } +static void tcmu_set_next_deadline(struct list_head *queue, + struct timer_list *timer) +{ + struct tcmu_cmd *tcmu_cmd, *tmp_cmd; + unsigned long deadline = 0; + + list_for_each_entry_safe(tcmu_cmd, tmp_cmd, queue, queue_entry) { + if (!time_after(jiffies, tcmu_cmd->deadline)) { + deadline = tcmu_cmd->deadline; + break; + } + } + + if (deadline) + mod_timer(timer, deadline); + else + del_timer(timer); +} + static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) { struct tcmu_mailbox *mb; + struct tcmu_cmd *cmd; int handled = 0; if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) { @@ -1210,7 +1239,6 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) while (udev->cmdr_last_cleaned != READ_ONCE(mb->cmd_tail)) { struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned; - struct tcmu_cmd *cmd; tcmu_flush_dcache_range(entry, sizeof(*entry)); @@ -1243,7 +1271,7 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) /* no more pending commands */ del_timer(&udev->cmd_timer); - if (list_empty(&udev->cmdr_queue)) { + if (list_empty(&udev->qfull_queue)) { /* * no more pending or waiting commands so try to * reclaim blocks if needed. @@ -1252,6 +1280,8 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) tcmu_global_max_blocks) schedule_delayed_work(&tcmu_unmap_work, 0); } + } else if (udev->cmd_time_out) { + tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer); } return handled; @@ -1271,7 +1301,7 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data) if (!time_after(jiffies, cmd->deadline)) return 0; - is_running = list_empty(&cmd->cmdr_queue_entry); + is_running = test_bit(TCMU_CMD_BIT_INFLIGHT, &cmd->flags); se_cmd = cmd->se_cmd; if (is_running) { @@ -1288,12 +1318,11 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data) */ scsi_status = SAM_STAT_CHECK_CONDITION; } else { - list_del_init(&cmd->cmdr_queue_entry); - idr_remove(&udev->commands, id); tcmu_free_cmd(cmd); scsi_status = SAM_STAT_TASK_SET_FULL; } + list_del_init(&cmd->queue_entry); pr_debug("Timing out cmd %u on dev %s that is %s.\n", id, udev->name, is_running ? "inflight" : "queued"); @@ -1372,7 +1401,8 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name) INIT_LIST_HEAD(&udev->node); INIT_LIST_HEAD(&udev->timedout_entry); - INIT_LIST_HEAD(&udev->cmdr_queue); + INIT_LIST_HEAD(&udev->qfull_queue); + INIT_LIST_HEAD(&udev->inflight_queue); idr_init(&udev->commands); timer_setup(&udev->qfull_timer, tcmu_qfull_timedout, 0); @@ -1383,7 +1413,7 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name) return &udev->se_dev; } -static bool run_cmdr_queue(struct tcmu_dev *udev, bool fail) +static bool run_qfull_queue(struct tcmu_dev *udev, bool fail) { struct tcmu_cmd *tcmu_cmd, *tmp_cmd; LIST_HEAD(cmds); @@ -1391,15 +1421,15 @@ static bool run_cmdr_queue(struct tcmu_dev *udev, bool fail) sense_reason_t scsi_ret; int ret; - if (list_empty(&udev->cmdr_queue)) + if (list_empty(&udev->qfull_queue)) return true; pr_debug("running %s's cmdr queue forcefail %d\n", udev->name, fail); - list_splice_init(&udev->cmdr_queue, &cmds); + list_splice_init(&udev->qfull_queue, &cmds); - list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, cmdr_queue_entry) { - list_del_init(&tcmu_cmd->cmdr_queue_entry); + list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, queue_entry) { + list_del_init(&tcmu_cmd->queue_entry); pr_debug("removing cmd %u on dev %s from queue\n", tcmu_cmd->cmd_id, udev->name); @@ -1437,14 +1467,13 @@ static bool run_cmdr_queue(struct tcmu_dev *udev, bool fail) * cmd was requeued, so just put all cmds back in * the queue */ - list_splice_tail(&cmds, &udev->cmdr_queue); + list_splice_tail(&cmds, &udev->qfull_queue); drained = false; - goto done; + break; } } - if (list_empty(&udev->cmdr_queue)) - del_timer(&udev->qfull_timer); -done: + + tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer); return drained; } @@ -1454,7 +1483,7 @@ static int tcmu_irqcontrol(struct uio_info *info, s32 irq_on) mutex_lock(&udev->cmdr_lock); tcmu_handle_completions(udev); - run_cmdr_queue(udev, false); + run_qfull_queue(udev, false); mutex_unlock(&udev->cmdr_lock); return 0; @@ -1982,7 +2011,7 @@ static void tcmu_block_dev(struct tcmu_dev *udev) /* complete IO that has executed successfully */ tcmu_handle_completions(udev); /* fail IO waiting to be queued */ - run_cmdr_queue(udev, true); + run_qfull_queue(udev, true); unlock: mutex_unlock(&udev->cmdr_lock); @@ -1997,7 +2026,7 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level) mutex_lock(&udev->cmdr_lock); idr_for_each_entry(&udev->commands, cmd, i) { - if (!list_empty(&cmd->cmdr_queue_entry)) + if (!test_bit(TCMU_CMD_BIT_INFLIGHT, &cmd->flags)) continue; pr_debug("removing cmd %u on dev %s from ring (is expired %d)\n", @@ -2006,6 +2035,7 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level) idr_remove(&udev->commands, i); if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) { + list_del_init(&cmd->queue_entry); if (err_level == 1) { /* * Userspace was not able to start the @@ -2666,6 +2696,10 @@ static void check_timedout_devices(void) mutex_lock(&udev->cmdr_lock); idr_for_each(&udev->commands, tcmu_check_expired_cmd, NULL); + + tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer); + tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer); + mutex_unlock(&udev->cmdr_lock); spin_lock_bh(&timed_out_udevs_lock); diff --git a/drivers/thermal/intel/int340x_thermal/Kconfig b/drivers/thermal/intel/int340x_thermal/Kconfig index 0582bd12a239..0ca908d12750 100644 --- a/drivers/thermal/intel/int340x_thermal/Kconfig +++ b/drivers/thermal/intel/int340x_thermal/Kconfig @@ -4,7 +4,7 @@ config INT340X_THERMAL tristate "ACPI INT340X thermal drivers" - depends on X86 && ACPI + depends on X86 && ACPI && PCI select THERMAL_GOV_USER_SPACE select ACPI_THERMAL_REL select ACPI_FAN diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index 8e10ab436d1f..344684f3e2e4 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -1127,16 +1127,18 @@ vhost_scsi_send_tmf_reject(struct vhost_scsi *vs, struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc) { - struct virtio_scsi_ctrl_tmf_resp __user *resp; struct virtio_scsi_ctrl_tmf_resp rsp; + struct iov_iter iov_iter; int ret; pr_debug("%s\n", __func__); memset(&rsp, 0, sizeof(rsp)); rsp.response = VIRTIO_SCSI_S_FUNCTION_REJECTED; - resp = vq->iov[vc->out].iov_base; - ret = __copy_to_user(resp, &rsp, sizeof(rsp)); - if (!ret) + + iov_iter_init(&iov_iter, READ, &vq->iov[vc->out], vc->in, sizeof(rsp)); + + ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter); + if (likely(ret == sizeof(rsp))) vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0); else pr_err("Faulted on virtio_scsi_ctrl_tmf_resp\n"); @@ -1147,16 +1149,18 @@ vhost_scsi_send_an_resp(struct vhost_scsi *vs, struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc) { - struct virtio_scsi_ctrl_an_resp __user *resp; struct virtio_scsi_ctrl_an_resp rsp; + struct iov_iter iov_iter; int ret; pr_debug("%s\n", __func__); memset(&rsp, 0, sizeof(rsp)); /* event_actual = 0 */ rsp.response = VIRTIO_SCSI_S_OK; - resp = vq->iov[vc->out].iov_base; - ret = __copy_to_user(resp, &rsp, sizeof(rsp)); - if (!ret) + + iov_iter_init(&iov_iter, READ, &vq->iov[vc->out], vc->in, sizeof(rsp)); + + ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter); + if (likely(ret == sizeof(rsp))) vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0); else pr_err("Faulted on virtio_scsi_ctrl_an_resp\n"); diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index babbb32b9bf0..15a216cdd507 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -1034,8 +1034,10 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev, int type, ret; ret = copy_from_iter(&type, sizeof(type), from); - if (ret != sizeof(type)) + if (ret != sizeof(type)) { + ret = -EINVAL; goto done; + } switch (type) { case VHOST_IOTLB_MSG: @@ -1054,8 +1056,10 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev, iov_iter_advance(from, offset); ret = copy_from_iter(&msg, sizeof(msg), from); - if (ret != sizeof(msg)) + if (ret != sizeof(msg)) { + ret = -EINVAL; goto done; + } if (vhost_process_iotlb_msg(dev, &msg)) { ret = -EFAULT; goto done; diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c index 8976190b6c1f..bfa1360ec750 100644 --- a/drivers/video/fbdev/core/fbcon.c +++ b/drivers/video/fbdev/core/fbcon.c @@ -510,6 +510,13 @@ static int __init fb_console_setup(char *this_opt) continue; } #endif + + if (!strncmp(options, "logo-pos:", 9)) { + options += 9; + if (!strcmp(options, "center")) + fb_center_logo = true; + continue; + } } return 1; } diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c index 558ed2ed3124..cb43a2258c51 100644 --- a/drivers/video/fbdev/core/fbmem.c +++ b/drivers/video/fbdev/core/fbmem.c @@ -53,6 +53,9 @@ EXPORT_SYMBOL(registered_fb); int num_registered_fb __read_mostly; EXPORT_SYMBOL(num_registered_fb); +bool fb_center_logo __read_mostly; +EXPORT_SYMBOL(fb_center_logo); + static struct fb_info *get_fb_info(unsigned int idx) { struct fb_info *fb_info; @@ -506,8 +509,7 @@ static int fb_show_logo_line(struct fb_info *info, int rotate, fb_set_logo(info, logo, logo_new, fb_logo.depth); } -#ifdef CONFIG_FB_LOGO_CENTER - { + if (fb_center_logo) { int xres = info->var.xres; int yres = info->var.yres; @@ -520,11 +522,11 @@ static int fb_show_logo_line(struct fb_info *info, int rotate, --n; image.dx = (xres - n * (logo->width + 8) - 8) / 2; image.dy = y ?: (yres - logo->height) / 2; + } else { + image.dx = 0; + image.dy = y; } -#else - image.dx = 0; - image.dy = y; -#endif + image.width = logo->width; image.height = logo->height; @@ -684,9 +686,8 @@ int fb_prepare_logo(struct fb_info *info, int rotate) } height = fb_logo.logo->height; -#ifdef CONFIG_FB_LOGO_CENTER - height += (yres - fb_logo.logo->height) / 2; -#endif + if (fb_center_logo) + height += (yres - fb_logo.logo->height) / 2; return fb_prepare_extra_logos(info, height, yres); } diff --git a/drivers/video/fbdev/offb.c b/drivers/video/fbdev/offb.c index 31f769d67195..057d3cdef92e 100644 --- a/drivers/video/fbdev/offb.c +++ b/drivers/video/fbdev/offb.c @@ -318,28 +318,28 @@ static void __iomem *offb_map_reg(struct device_node *np, int index, } static void offb_init_palette_hacks(struct fb_info *info, struct device_node *dp, - const char *name, unsigned long address) + unsigned long address) { struct offb_par *par = (struct offb_par *) info->par; - if (dp && !strncmp(name, "ATY,Rage128", 11)) { + if (of_node_name_prefix(dp, "ATY,Rage128")) { par->cmap_adr = offb_map_reg(dp, 2, 0, 0x1fff); if (par->cmap_adr) par->cmap_type = cmap_r128; - } else if (dp && (!strncmp(name, "ATY,RageM3pA", 12) - || !strncmp(name, "ATY,RageM3p12A", 14))) { + } else if (of_node_name_prefix(dp, "ATY,RageM3pA") || + of_node_name_prefix(dp, "ATY,RageM3p12A")) { par->cmap_adr = offb_map_reg(dp, 2, 0, 0x1fff); if (par->cmap_adr) par->cmap_type = cmap_M3A; - } else if (dp && !strncmp(name, "ATY,RageM3pB", 12)) { + } else if (of_node_name_prefix(dp, "ATY,RageM3pB")) { par->cmap_adr = offb_map_reg(dp, 2, 0, 0x1fff); if (par->cmap_adr) par->cmap_type = cmap_M3B; - } else if (dp && !strncmp(name, "ATY,Rage6", 9)) { + } else if (of_node_name_prefix(dp, "ATY,Rage6")) { par->cmap_adr = offb_map_reg(dp, 1, 0, 0x1fff); if (par->cmap_adr) par->cmap_type = cmap_radeon; - } else if (!strncmp(name, "ATY,", 4)) { + } else if (of_node_name_prefix(dp, "ATY,")) { unsigned long base = address & 0xff000000UL; par->cmap_adr = ioremap(base + 0x7ff000, 0x1000) + 0xcc0; @@ -350,7 +350,7 @@ static void offb_init_palette_hacks(struct fb_info *info, struct device_node *dp par->cmap_adr = offb_map_reg(dp, 0, 0x6000, 0x1000); if (par->cmap_adr) par->cmap_type = cmap_gxt2000; - } else if (dp && !strncmp(name, "vga,Display-", 12)) { + } else if (of_node_name_prefix(dp, "vga,Display-")) { /* Look for AVIVO initialized by SLOF */ struct device_node *pciparent = of_get_parent(dp); const u32 *vid, *did; @@ -438,7 +438,7 @@ static void __init offb_init_fb(const char *name, par->cmap_type = cmap_unknown; if (depth == 8) - offb_init_palette_hacks(info, dp, name, address); + offb_init_palette_hacks(info, dp, address); else fix->visual = FB_VISUAL_TRUECOLOR; diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c index 53f93616c671..8e23160ec59f 100644 --- a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c +++ b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c @@ -609,6 +609,8 @@ int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd, unsigned long arg) int r = 0; + memset(&p, 0, sizeof(p)); + switch (cmd) { case OMAPFB_SYNC_GFX: DBG("ioctl SYNC_GFX\n"); diff --git a/drivers/video/logo/Kconfig b/drivers/video/logo/Kconfig index 1e972c4e88b1..d1f6196c8b9a 100644 --- a/drivers/video/logo/Kconfig +++ b/drivers/video/logo/Kconfig @@ -10,15 +10,6 @@ menuconfig LOGO if LOGO -config FB_LOGO_CENTER - bool "Center the logo" - depends on FB=y - help - When this option is selected, the bootup logo is centered both - horizontally and vertically. If more than one logo is displayed - due to multiple CPUs, the collected line of logos is centered - as a whole. - config FB_LOGO_EXTRA bool depends on FB=y diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 728ecd1eea30..fb12fe205f86 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c @@ -61,6 +61,10 @@ enum virtio_balloon_vq { VIRTIO_BALLOON_VQ_MAX }; +enum virtio_balloon_config_read { + VIRTIO_BALLOON_CONFIG_READ_CMD_ID = 0, +}; + struct virtio_balloon { struct virtio_device *vdev; struct virtqueue *inflate_vq, *deflate_vq, *stats_vq, *free_page_vq; @@ -77,14 +81,20 @@ struct virtio_balloon { /* Prevent updating balloon when it is being canceled. */ spinlock_t stop_update_lock; bool stop_update; + /* Bitmap to indicate if reading the related config fields are needed */ + unsigned long config_read_bitmap; /* The list of allocated free pages, waiting to be given back to mm */ struct list_head free_page_list; spinlock_t free_page_list_lock; /* The number of free page blocks on the above list */ unsigned long num_free_page_blocks; - /* The cmd id received from host */ - u32 cmd_id_received; + /* + * The cmd id received from host. + * Read it via virtio_balloon_cmd_id_received to get the latest value + * sent from host. + */ + u32 cmd_id_received_cache; /* The cmd id that is actively in use */ __virtio32 cmd_id_active; /* Buffer to store the stop sign */ @@ -390,37 +400,31 @@ static unsigned long return_free_pages_to_mm(struct virtio_balloon *vb, return num_returned; } +static void virtio_balloon_queue_free_page_work(struct virtio_balloon *vb) +{ + if (!virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) + return; + + /* No need to queue the work if the bit was already set. */ + if (test_and_set_bit(VIRTIO_BALLOON_CONFIG_READ_CMD_ID, + &vb->config_read_bitmap)) + return; + + queue_work(vb->balloon_wq, &vb->report_free_page_work); +} + static void virtballoon_changed(struct virtio_device *vdev) { struct virtio_balloon *vb = vdev->priv; unsigned long flags; - s64 diff = towards_target(vb); - - if (diff) { - spin_lock_irqsave(&vb->stop_update_lock, flags); - if (!vb->stop_update) - queue_work(system_freezable_wq, - &vb->update_balloon_size_work); - spin_unlock_irqrestore(&vb->stop_update_lock, flags); - } - if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) { - virtio_cread(vdev, struct virtio_balloon_config, - free_page_report_cmd_id, &vb->cmd_id_received); - if (vb->cmd_id_received == VIRTIO_BALLOON_CMD_ID_DONE) { - /* Pass ULONG_MAX to give back all the free pages */ - return_free_pages_to_mm(vb, ULONG_MAX); - } else if (vb->cmd_id_received != VIRTIO_BALLOON_CMD_ID_STOP && - vb->cmd_id_received != - virtio32_to_cpu(vdev, vb->cmd_id_active)) { - spin_lock_irqsave(&vb->stop_update_lock, flags); - if (!vb->stop_update) { - queue_work(vb->balloon_wq, - &vb->report_free_page_work); - } - spin_unlock_irqrestore(&vb->stop_update_lock, flags); - } + spin_lock_irqsave(&vb->stop_update_lock, flags); + if (!vb->stop_update) { + queue_work(system_freezable_wq, + &vb->update_balloon_size_work); + virtio_balloon_queue_free_page_work(vb); } + spin_unlock_irqrestore(&vb->stop_update_lock, flags); } static void update_balloon_size(struct virtio_balloon *vb) @@ -527,6 +531,17 @@ static int init_vqs(struct virtio_balloon *vb) return 0; } +static u32 virtio_balloon_cmd_id_received(struct virtio_balloon *vb) +{ + if (test_and_clear_bit(VIRTIO_BALLOON_CONFIG_READ_CMD_ID, + &vb->config_read_bitmap)) + virtio_cread(vb->vdev, struct virtio_balloon_config, + free_page_report_cmd_id, + &vb->cmd_id_received_cache); + + return vb->cmd_id_received_cache; +} + static int send_cmd_id_start(struct virtio_balloon *vb) { struct scatterlist sg; @@ -537,7 +552,8 @@ static int send_cmd_id_start(struct virtio_balloon *vb) while (virtqueue_get_buf(vq, &unused)) ; - vb->cmd_id_active = cpu_to_virtio32(vb->vdev, vb->cmd_id_received); + vb->cmd_id_active = virtio32_to_cpu(vb->vdev, + virtio_balloon_cmd_id_received(vb)); sg_init_one(&sg, &vb->cmd_id_active, sizeof(vb->cmd_id_active)); err = virtqueue_add_outbuf(vq, &sg, 1, &vb->cmd_id_active, GFP_KERNEL); if (!err) @@ -620,7 +636,8 @@ static int send_free_pages(struct virtio_balloon *vb) * stop the reporting. */ cmd_id_active = virtio32_to_cpu(vb->vdev, vb->cmd_id_active); - if (cmd_id_active != vb->cmd_id_received) + if (unlikely(cmd_id_active != + virtio_balloon_cmd_id_received(vb))) break; /* @@ -637,11 +654,9 @@ static int send_free_pages(struct virtio_balloon *vb) return 0; } -static void report_free_page_func(struct work_struct *work) +static void virtio_balloon_report_free_page(struct virtio_balloon *vb) { int err; - struct virtio_balloon *vb = container_of(work, struct virtio_balloon, - report_free_page_work); struct device *dev = &vb->vdev->dev; /* Start by sending the received cmd id to host with an outbuf. */ @@ -659,6 +674,23 @@ static void report_free_page_func(struct work_struct *work) dev_err(dev, "Failed to send a stop id, err = %d\n", err); } +static void report_free_page_func(struct work_struct *work) +{ + struct virtio_balloon *vb = container_of(work, struct virtio_balloon, + report_free_page_work); + u32 cmd_id_received; + + cmd_id_received = virtio_balloon_cmd_id_received(vb); + if (cmd_id_received == VIRTIO_BALLOON_CMD_ID_DONE) { + /* Pass ULONG_MAX to give back all the free pages */ + return_free_pages_to_mm(vb, ULONG_MAX); + } else if (cmd_id_received != VIRTIO_BALLOON_CMD_ID_STOP && + cmd_id_received != + virtio32_to_cpu(vb->vdev, vb->cmd_id_active)) { + virtio_balloon_report_free_page(vb); + } +} + #ifdef CONFIG_BALLOON_COMPACTION /* * virtballoon_migratepage - perform the balloon page migration on behalf of @@ -885,7 +917,7 @@ static int virtballoon_probe(struct virtio_device *vdev) goto out_del_vqs; } INIT_WORK(&vb->report_free_page_work, report_free_page_func); - vb->cmd_id_received = VIRTIO_BALLOON_CMD_ID_STOP; + vb->cmd_id_received_cache = VIRTIO_BALLOON_CMD_ID_STOP; vb->cmd_id_active = cpu_to_virtio32(vb->vdev, VIRTIO_BALLOON_CMD_ID_STOP); vb->cmd_id_stop = cpu_to_virtio32(vb->vdev, diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c index 4cd9ea5c75be..d9dd0f789279 100644 --- a/drivers/virtio/virtio_mmio.c +++ b/drivers/virtio/virtio_mmio.c @@ -468,7 +468,7 @@ static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs, { struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); unsigned int irq = platform_get_irq(vm_dev->pdev, 0); - int i, err; + int i, err, queue_idx = 0; err = request_irq(irq, vm_interrupt, IRQF_SHARED, dev_name(&vdev->dev), vm_dev); @@ -476,7 +476,12 @@ static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs, return err; for (i = 0; i < nvqs; ++i) { - vqs[i] = vm_setup_vq(vdev, i, callbacks[i], names[i], + if (!names[i]) { + vqs[i] = NULL; + continue; + } + + vqs[i] = vm_setup_vq(vdev, queue_idx++, callbacks[i], names[i], ctx ? ctx[i] : false); if (IS_ERR(vqs[i])) { vm_del_vqs(vdev); diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c index 465a6f5142cc..d0584c040c60 100644 --- a/drivers/virtio/virtio_pci_common.c +++ b/drivers/virtio/virtio_pci_common.c @@ -285,7 +285,7 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs, { struct virtio_pci_device *vp_dev = to_vp_device(vdev); u16 msix_vec; - int i, err, nvectors, allocated_vectors; + int i, err, nvectors, allocated_vectors, queue_idx = 0; vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL); if (!vp_dev->vqs) @@ -321,7 +321,7 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs, msix_vec = allocated_vectors++; else msix_vec = VP_MSIX_VQ_VECTOR; - vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i], + vqs[i] = vp_setup_vq(vdev, queue_idx++, callbacks[i], names[i], ctx ? ctx[i] : false, msix_vec); if (IS_ERR(vqs[i])) { @@ -356,7 +356,7 @@ static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned nvqs, const char * const names[], const bool *ctx) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); - int i, err; + int i, err, queue_idx = 0; vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL); if (!vp_dev->vqs) @@ -374,7 +374,7 @@ static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned nvqs, vqs[i] = NULL; continue; } - vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i], + vqs[i] = vp_setup_vq(vdev, queue_idx++, callbacks[i], names[i], ctx ? ctx[i] : false, VIRTIO_MSI_NO_VECTOR); if (IS_ERR(vqs[i])) { diff --git a/drivers/watchdog/mt7621_wdt.c b/drivers/watchdog/mt7621_wdt.c index 5c4a764717c4..81208cd3f4ec 100644 --- a/drivers/watchdog/mt7621_wdt.c +++ b/drivers/watchdog/mt7621_wdt.c @@ -17,6 +17,7 @@ #include <linux/watchdog.h> #include <linux/moduleparam.h> #include <linux/platform_device.h> +#include <linux/mod_devicetable.h> #include <asm/mach-ralink/ralink_regs.h> diff --git a/drivers/watchdog/rt2880_wdt.c b/drivers/watchdog/rt2880_wdt.c index 98967f0a7d10..db7c57d82cfd 100644 --- a/drivers/watchdog/rt2880_wdt.c +++ b/drivers/watchdog/rt2880_wdt.c @@ -18,6 +18,7 @@ #include <linux/watchdog.h> #include <linux/moduleparam.h> #include <linux/platform_device.h> +#include <linux/mod_devicetable.h> #include <asm/mach-ralink/ralink_regs.h> diff --git a/drivers/watchdog/tqmx86_wdt.c b/drivers/watchdog/tqmx86_wdt.c index 0d3a0fbbd7a5..52941207a12a 100644 --- a/drivers/watchdog/tqmx86_wdt.c +++ b/drivers/watchdog/tqmx86_wdt.c @@ -79,13 +79,13 @@ static int tqmx86_wdt_probe(struct platform_device *pdev) return -ENOMEM; res = platform_get_resource(pdev, IORESOURCE_IO, 0); - if (IS_ERR(res)) - return PTR_ERR(res); + if (!res) + return -ENODEV; priv->io_base = devm_ioport_map(&pdev->dev, res->start, resource_size(res)); - if (IS_ERR(priv->io_base)) - return PTR_ERR(priv->io_base); + if (!priv->io_base) + return -ENOMEM; watchdog_set_drvdata(&priv->wdd, priv); diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c index 93194f3e7540..117e76b2f939 100644 --- a/drivers/xen/events/events_base.c +++ b/drivers/xen/events/events_base.c @@ -1650,7 +1650,7 @@ void xen_callback_vector(void) xen_have_vector_callback = 0; return; } - pr_info("Xen HVM callback vector for event delivery is enabled\n"); + pr_info_once("Xen HVM callback vector for event delivery is enabled\n"); alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, xen_hvm_callback_vector); } diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c index 2e5d845b5091..7aa64d1b119c 100644 --- a/drivers/xen/pvcalls-back.c +++ b/drivers/xen/pvcalls-back.c @@ -160,9 +160,10 @@ static void pvcalls_conn_back_read(void *opaque) /* write the data, then modify the indexes */ virt_wmb(); - if (ret < 0) + if (ret < 0) { + atomic_set(&map->read, 0); intf->in_error = ret; - else + } else intf->in_prod = prod + ret; /* update the indexes, then notify the other end */ virt_wmb(); @@ -282,13 +283,11 @@ static int pvcalls_back_socket(struct xenbus_device *dev, static void pvcalls_sk_state_change(struct sock *sock) { struct sock_mapping *map = sock->sk_user_data; - struct pvcalls_data_intf *intf; if (map == NULL) return; - intf = map->ring; - intf->in_error = -ENOTCONN; + atomic_inc(&map->read); notify_remote_via_irq(map->irq); } diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c index 77224d8f3e6f..8a249c95c193 100644 --- a/drivers/xen/pvcalls-front.c +++ b/drivers/xen/pvcalls-front.c @@ -31,6 +31,12 @@ #define PVCALLS_NR_RSP_PER_RING __CONST_RING_SIZE(xen_pvcalls, XEN_PAGE_SIZE) #define PVCALLS_FRONT_MAX_SPIN 5000 +static struct proto pvcalls_proto = { + .name = "PVCalls", + .owner = THIS_MODULE, + .obj_size = sizeof(struct sock), +}; + struct pvcalls_bedata { struct xen_pvcalls_front_ring ring; grant_ref_t ref; @@ -335,6 +341,42 @@ int pvcalls_front_socket(struct socket *sock) return ret; } +static void free_active_ring(struct sock_mapping *map) +{ + if (!map->active.ring) + return; + + free_pages((unsigned long)map->active.data.in, + map->active.ring->ring_order); + free_page((unsigned long)map->active.ring); +} + +static int alloc_active_ring(struct sock_mapping *map) +{ + void *bytes; + + map->active.ring = (struct pvcalls_data_intf *) + get_zeroed_page(GFP_KERNEL); + if (!map->active.ring) + goto out; + + map->active.ring->ring_order = PVCALLS_RING_ORDER; + bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, + PVCALLS_RING_ORDER); + if (!bytes) + goto out; + + map->active.data.in = bytes; + map->active.data.out = bytes + + XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER); + + return 0; + +out: + free_active_ring(map); + return -ENOMEM; +} + static int create_active(struct sock_mapping *map, int *evtchn) { void *bytes; @@ -343,15 +385,7 @@ static int create_active(struct sock_mapping *map, int *evtchn) *evtchn = -1; init_waitqueue_head(&map->active.inflight_conn_req); - map->active.ring = (struct pvcalls_data_intf *) - __get_free_page(GFP_KERNEL | __GFP_ZERO); - if (map->active.ring == NULL) - goto out_error; - map->active.ring->ring_order = PVCALLS_RING_ORDER; - bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, - PVCALLS_RING_ORDER); - if (bytes == NULL) - goto out_error; + bytes = map->active.data.in; for (i = 0; i < (1 << PVCALLS_RING_ORDER); i++) map->active.ring->ref[i] = gnttab_grant_foreign_access( pvcalls_front_dev->otherend_id, @@ -361,10 +395,6 @@ static int create_active(struct sock_mapping *map, int *evtchn) pvcalls_front_dev->otherend_id, pfn_to_gfn(virt_to_pfn((void *)map->active.ring)), 0); - map->active.data.in = bytes; - map->active.data.out = bytes + - XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER); - ret = xenbus_alloc_evtchn(pvcalls_front_dev, evtchn); if (ret) goto out_error; @@ -385,8 +415,6 @@ static int create_active(struct sock_mapping *map, int *evtchn) out_error: if (*evtchn >= 0) xenbus_free_evtchn(pvcalls_front_dev, *evtchn); - free_pages((unsigned long)map->active.data.in, PVCALLS_RING_ORDER); - free_page((unsigned long)map->active.ring); return ret; } @@ -406,17 +434,24 @@ int pvcalls_front_connect(struct socket *sock, struct sockaddr *addr, return PTR_ERR(map); bedata = dev_get_drvdata(&pvcalls_front_dev->dev); + ret = alloc_active_ring(map); + if (ret < 0) { + pvcalls_exit_sock(sock); + return ret; + } spin_lock(&bedata->socket_lock); ret = get_request(bedata, &req_id); if (ret < 0) { spin_unlock(&bedata->socket_lock); + free_active_ring(map); pvcalls_exit_sock(sock); return ret; } ret = create_active(map, &evtchn); if (ret < 0) { spin_unlock(&bedata->socket_lock); + free_active_ring(map); pvcalls_exit_sock(sock); return ret; } @@ -469,8 +504,10 @@ static int __write_ring(struct pvcalls_data_intf *intf, virt_mb(); size = pvcalls_queued(prod, cons, array_size); - if (size >= array_size) + if (size > array_size) return -EINVAL; + if (size == array_size) + return 0; if (len > array_size - size) len = array_size - size; @@ -560,15 +597,13 @@ static int __read_ring(struct pvcalls_data_intf *intf, error = intf->in_error; /* get pointers before reading from the ring */ virt_rmb(); - if (error < 0) - return error; size = pvcalls_queued(prod, cons, array_size); masked_prod = pvcalls_mask(prod, array_size); masked_cons = pvcalls_mask(cons, array_size); if (size == 0) - return 0; + return error ?: size; if (len > size) len = size; @@ -780,25 +815,36 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags) } } - spin_lock(&bedata->socket_lock); - ret = get_request(bedata, &req_id); - if (ret < 0) { + map2 = kzalloc(sizeof(*map2), GFP_KERNEL); + if (map2 == NULL) { clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, (void *)&map->passive.flags); - spin_unlock(&bedata->socket_lock); + pvcalls_exit_sock(sock); + return -ENOMEM; + } + ret = alloc_active_ring(map2); + if (ret < 0) { + clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, + (void *)&map->passive.flags); + kfree(map2); pvcalls_exit_sock(sock); return ret; } - map2 = kzalloc(sizeof(*map2), GFP_ATOMIC); - if (map2 == NULL) { + spin_lock(&bedata->socket_lock); + ret = get_request(bedata, &req_id); + if (ret < 0) { clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, (void *)&map->passive.flags); spin_unlock(&bedata->socket_lock); + free_active_ring(map2); + kfree(map2); pvcalls_exit_sock(sock); - return -ENOMEM; + return ret; } + ret = create_active(map2, &evtchn); if (ret < 0) { + free_active_ring(map2); kfree(map2); clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, (void *)&map->passive.flags); @@ -839,7 +885,7 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags) received: map2->sock = newsock; - newsock->sk = kzalloc(sizeof(*newsock->sk), GFP_KERNEL); + newsock->sk = sk_alloc(sock_net(sock->sk), PF_INET, GFP_KERNEL, &pvcalls_proto, false); if (!newsock->sk) { bedata->rsp[req_id].req_id = PVCALLS_INVALID_ID; map->passive.inflight_req_id = PVCALLS_INVALID_ID; @@ -1032,8 +1078,8 @@ int pvcalls_front_release(struct socket *sock) spin_lock(&bedata->socket_lock); list_del(&map->list); spin_unlock(&bedata->socket_lock); - if (READ_ONCE(map->passive.inflight_req_id) != - PVCALLS_INVALID_ID) { + if (READ_ONCE(map->passive.inflight_req_id) != PVCALLS_INVALID_ID && + READ_ONCE(map->passive.inflight_req_id) != 0) { pvcalls_front_free_map(bedata, map->passive.accept_map); } diff --git a/fs/afs/flock.c b/fs/afs/flock.c index 0568fd986821..e432bd27a2e7 100644 --- a/fs/afs/flock.c +++ b/fs/afs/flock.c @@ -208,7 +208,7 @@ again: /* The new front of the queue now owns the state variables. */ next = list_entry(vnode->pending_locks.next, struct file_lock, fl_u.afs.link); - vnode->lock_key = afs_file_key(next->fl_file); + vnode->lock_key = key_get(afs_file_key(next->fl_file)); vnode->lock_type = (next->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE; vnode->lock_state = AFS_VNODE_LOCK_WAITING_FOR_CB; goto again; @@ -413,7 +413,7 @@ static void afs_dequeue_lock(struct afs_vnode *vnode, struct file_lock *fl) /* The new front of the queue now owns the state variables. */ next = list_entry(vnode->pending_locks.next, struct file_lock, fl_u.afs.link); - vnode->lock_key = afs_file_key(next->fl_file); + vnode->lock_key = key_get(afs_file_key(next->fl_file)); vnode->lock_type = (next->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE; vnode->lock_state = AFS_VNODE_LOCK_WAITING_FOR_CB; afs_lock_may_be_available(vnode); diff --git a/fs/afs/inode.c b/fs/afs/inode.c index 6b17d3620414..1a4ce07fb406 100644 --- a/fs/afs/inode.c +++ b/fs/afs/inode.c @@ -414,7 +414,6 @@ int afs_validate(struct afs_vnode *vnode, struct key *key) } else if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) { valid = true; } else { - vnode->cb_s_break = vnode->cb_interest->server->cb_s_break; vnode->cb_v_break = vnode->volume->cb_v_break; valid = false; } @@ -546,6 +545,8 @@ void afs_evict_inode(struct inode *inode) #endif afs_put_permits(rcu_access_pointer(vnode->permit_cache)); + key_put(vnode->lock_key); + vnode->lock_key = NULL; _leave(""); } diff --git a/fs/afs/protocol_yfs.h b/fs/afs/protocol_yfs.h index 07bc10f076aa..d443e2bfa094 100644 --- a/fs/afs/protocol_yfs.h +++ b/fs/afs/protocol_yfs.h @@ -161,3 +161,14 @@ struct yfs_xdr_YFSStoreVolumeStatus { struct yfs_xdr_u64 max_quota; struct yfs_xdr_u64 file_quota; } __packed; + +enum yfs_lock_type { + yfs_LockNone = -1, + yfs_LockRead = 0, + yfs_LockWrite = 1, + yfs_LockExtend = 2, + yfs_LockRelease = 3, + yfs_LockMandatoryRead = 0x100, + yfs_LockMandatoryWrite = 0x101, + yfs_LockMandatoryExtend = 0x102, +}; diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c index a7b44863d502..2c588f9bbbda 100644 --- a/fs/afs/rxrpc.c +++ b/fs/afs/rxrpc.c @@ -23,6 +23,7 @@ struct workqueue_struct *afs_async_calls; static void afs_wake_up_call_waiter(struct sock *, struct rxrpc_call *, unsigned long); static long afs_wait_for_call_to_complete(struct afs_call *, struct afs_addr_cursor *); static void afs_wake_up_async_call(struct sock *, struct rxrpc_call *, unsigned long); +static void afs_delete_async_call(struct work_struct *); static void afs_process_async_call(struct work_struct *); static void afs_rx_new_call(struct sock *, struct rxrpc_call *, unsigned long); static void afs_rx_discard_new_call(struct rxrpc_call *, unsigned long); @@ -203,20 +204,26 @@ void afs_put_call(struct afs_call *call) } } +static struct afs_call *afs_get_call(struct afs_call *call, + enum afs_call_trace why) +{ + int u = atomic_inc_return(&call->usage); + + trace_afs_call(call, why, u, + atomic_read(&call->net->nr_outstanding_calls), + __builtin_return_address(0)); + return call; +} + /* * Queue the call for actual work. */ static void afs_queue_call_work(struct afs_call *call) { if (call->type->work) { - int u = atomic_inc_return(&call->usage); - - trace_afs_call(call, afs_call_trace_work, u, - atomic_read(&call->net->nr_outstanding_calls), - __builtin_return_address(0)); - INIT_WORK(&call->work, call->type->work); + afs_get_call(call, afs_call_trace_work); if (!queue_work(afs_wq, &call->work)) afs_put_call(call); } @@ -398,6 +405,12 @@ long afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call, } } + /* If the call is going to be asynchronous, we need an extra ref for + * the call to hold itself so the caller need not hang on to its ref. + */ + if (call->async) + afs_get_call(call, afs_call_trace_get); + /* create a call */ rxcall = rxrpc_kernel_begin_call(call->net->socket, srx, call->key, (unsigned long)call, @@ -438,15 +451,17 @@ long afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call, goto error_do_abort; } - /* at this point, an async call may no longer exist as it may have - * already completed */ - if (call->async) + /* Note that at this point, we may have received the reply or an abort + * - and an asynchronous call may already have completed. + */ + if (call->async) { + afs_put_call(call); return -EINPROGRESS; + } return afs_wait_for_call_to_complete(call, ac); error_do_abort: - call->state = AFS_CALL_COMPLETE; if (ret != -ECONNABORTED) { rxrpc_kernel_abort_call(call->net->socket, rxcall, RX_USER_ABORT, ret, "KSD"); @@ -463,8 +478,24 @@ error_do_abort: error_kill_call: if (call->type->done) call->type->done(call); - afs_put_call(call); + + /* We need to dispose of the extra ref we grabbed for an async call. + * The call, however, might be queued on afs_async_calls and we need to + * make sure we don't get any more notifications that might requeue it. + */ + if (call->rxcall) { + rxrpc_kernel_end_call(call->net->socket, call->rxcall); + call->rxcall = NULL; + } + if (call->async) { + if (cancel_work_sync(&call->async_work)) + afs_put_call(call); + afs_put_call(call); + } + ac->error = ret; + call->state = AFS_CALL_COMPLETE; + afs_put_call(call); _leave(" = %d", ret); return ret; } diff --git a/fs/afs/server_list.c b/fs/afs/server_list.c index 95d0761cdb34..155dc14caef9 100644 --- a/fs/afs/server_list.c +++ b/fs/afs/server_list.c @@ -42,9 +42,7 @@ struct afs_server_list *afs_alloc_server_list(struct afs_cell *cell, if (vldb->fs_mask[i] & type_mask) nr_servers++; - slist = kzalloc(sizeof(struct afs_server_list) + - sizeof(struct afs_server_entry) * nr_servers, - GFP_KERNEL); + slist = kzalloc(struct_size(slist, servers, nr_servers), GFP_KERNEL); if (!slist) goto error; diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c index 12658c1363ae..5aa57929e8c2 100644 --- a/fs/afs/yfsclient.c +++ b/fs/afs/yfsclient.c @@ -803,7 +803,7 @@ int yfs_fs_create_file(struct afs_fs_cursor *fc, bp = xdr_encode_YFSFid(bp, &vnode->fid); bp = xdr_encode_string(bp, name, namesz); bp = xdr_encode_YFSStoreStatus_mode(bp, mode); - bp = xdr_encode_u32(bp, 0); /* ViceLockType */ + bp = xdr_encode_u32(bp, yfs_LockNone); /* ViceLockType */ yfs_check_req(call, bp); afs_use_fs_server(call, fc->cbi); diff --git a/fs/block_dev.c b/fs/block_dev.c index c546cdce77e6..58a4c1217fa8 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -104,6 +104,20 @@ void invalidate_bdev(struct block_device *bdev) } EXPORT_SYMBOL(invalidate_bdev); +static void set_init_blocksize(struct block_device *bdev) +{ + unsigned bsize = bdev_logical_block_size(bdev); + loff_t size = i_size_read(bdev->bd_inode); + + while (bsize < PAGE_SIZE) { + if (size & bsize) + break; + bsize <<= 1; + } + bdev->bd_block_size = bsize; + bdev->bd_inode->i_blkbits = blksize_bits(bsize); +} + int set_blocksize(struct block_device *bdev, int size) { /* Size must be a power of two, and between 512 and PAGE_SIZE */ @@ -1431,18 +1445,9 @@ EXPORT_SYMBOL(check_disk_change); void bd_set_size(struct block_device *bdev, loff_t size) { - unsigned bsize = bdev_logical_block_size(bdev); - inode_lock(bdev->bd_inode); i_size_write(bdev->bd_inode, size); inode_unlock(bdev->bd_inode); - while (bsize < PAGE_SIZE) { - if (size & bsize) - break; - bsize <<= 1; - } - bdev->bd_block_size = bsize; - bdev->bd_inode->i_blkbits = blksize_bits(bsize); } EXPORT_SYMBOL(bd_set_size); @@ -1519,8 +1524,10 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) } } - if (!ret) + if (!ret) { bd_set_size(bdev,(loff_t)get_capacity(disk)<<9); + set_init_blocksize(bdev); + } /* * If the device is invalidated, rescan partition @@ -1555,6 +1562,7 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) goto out_clear; } bd_set_size(bdev, (loff_t)bdev->bd_part->nr_sects << 9); + set_init_blocksize(bdev); } if (bdev->bd_bdi == &noop_backing_dev_info) diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 0a68cf7032f5..7a2a2621f0d9 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -35,6 +35,7 @@ struct btrfs_trans_handle; struct btrfs_transaction; struct btrfs_pending_snapshot; +struct btrfs_delayed_ref_root; extern struct kmem_cache *btrfs_trans_handle_cachep; extern struct kmem_cache *btrfs_bit_radix_cachep; extern struct kmem_cache *btrfs_path_cachep; @@ -786,6 +787,9 @@ enum { * main phase. The fs_info::balance_ctl is initialized. */ BTRFS_FS_BALANCE_RUNNING, + + /* Indicate that the cleaner thread is awake and doing something. */ + BTRFS_FS_CLEANER_RUNNING, }; struct btrfs_fs_info { @@ -2661,6 +2665,9 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, unsigned long count); int btrfs_async_run_delayed_refs(struct btrfs_fs_info *fs_info, unsigned long count, u64 transid, int wait); +void btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info *fs_info, + struct btrfs_delayed_ref_root *delayed_refs, + struct btrfs_delayed_ref_head *head); int btrfs_lookup_data_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len); int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info, u64 bytenr, diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 8da2f380d3c0..6a2a2a951705 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1682,6 +1682,8 @@ static int cleaner_kthread(void *arg) while (1) { again = 0; + set_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags); + /* Make the cleaner go to sleep early. */ if (btrfs_need_cleaner_sleep(fs_info)) goto sleep; @@ -1728,6 +1730,7 @@ static int cleaner_kthread(void *arg) */ btrfs_delete_unused_bgs(fs_info); sleep: + clear_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags); if (kthread_should_park()) kthread_parkme(); if (kthread_should_stop()) @@ -4201,6 +4204,14 @@ static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info) spin_lock(&fs_info->ordered_root_lock); } spin_unlock(&fs_info->ordered_root_lock); + + /* + * We need this here because if we've been flipped read-only we won't + * get sync() from the umount, so we need to make sure any ordered + * extents that haven't had their dirty pages IO start writeout yet + * actually get run and error out properly. + */ + btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1); } static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, @@ -4265,6 +4276,7 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, if (pin_bytes) btrfs_pin_extent(fs_info, head->bytenr, head->num_bytes, 1); + btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head); btrfs_put_delayed_ref_head(head); cond_resched(); spin_lock(&delayed_refs->lock); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index b15afeae16df..d81035b7ea7d 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -2456,12 +2456,10 @@ static int run_and_cleanup_extent_op(struct btrfs_trans_handle *trans, return ret ? ret : 1; } -static void cleanup_ref_head_accounting(struct btrfs_trans_handle *trans, - struct btrfs_delayed_ref_head *head) +void btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info *fs_info, + struct btrfs_delayed_ref_root *delayed_refs, + struct btrfs_delayed_ref_head *head) { - struct btrfs_fs_info *fs_info = trans->fs_info; - struct btrfs_delayed_ref_root *delayed_refs = - &trans->transaction->delayed_refs; int nr_items = 1; /* Dropping this ref head update. */ if (head->total_ref_mod < 0) { @@ -2544,7 +2542,7 @@ static int cleanup_ref_head(struct btrfs_trans_handle *trans, } } - cleanup_ref_head_accounting(trans, head); + btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head); trace_run_delayed_ref_head(fs_info, head, 0); btrfs_delayed_ref_unlock(head); @@ -4954,6 +4952,15 @@ static void flush_space(struct btrfs_fs_info *fs_info, ret = 0; break; case COMMIT_TRANS: + /* + * If we have pending delayed iputs then we could free up a + * bunch of pinned space, so make sure we run the iputs before + * we do our pinned bytes check below. + */ + mutex_lock(&fs_info->cleaner_delayed_iput_mutex); + btrfs_run_delayed_iputs(fs_info); + mutex_unlock(&fs_info->cleaner_delayed_iput_mutex); + ret = may_commit_transaction(fs_info, space_info); break; default: @@ -7188,7 +7195,7 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans, if (head->must_insert_reserved) ret = 1; - cleanup_ref_head_accounting(trans, head); + btrfs_cleanup_ref_head_accounting(trans->fs_info, delayed_refs, head); mutex_unlock(&head->mutex); btrfs_put_delayed_ref_head(head); return ret; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 43eb4535319d..5c349667c761 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -3129,9 +3129,6 @@ out: /* once for the tree */ btrfs_put_ordered_extent(ordered_extent); - /* Try to release some metadata so we don't get an OOM but don't wait */ - btrfs_btree_balance_dirty_nodelay(fs_info); - return ret; } @@ -3254,6 +3251,8 @@ void btrfs_add_delayed_iput(struct inode *inode) ASSERT(list_empty(&binode->delayed_iput)); list_add_tail(&binode->delayed_iput, &fs_info->delayed_iputs); spin_unlock(&fs_info->delayed_iput_lock); + if (!test_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags)) + wake_up_process(fs_info->cleaner_kthread); } void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info) diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c index 46d691ba04bc..45b2322e092d 100644 --- a/fs/nfs/nfs4file.c +++ b/fs/nfs/nfs4file.c @@ -133,15 +133,9 @@ static ssize_t nfs4_copy_file_range(struct file *file_in, loff_t pos_in, struct file *file_out, loff_t pos_out, size_t count, unsigned int flags) { - ssize_t ret; - if (file_inode(file_in) == file_inode(file_out)) return -EINVAL; -retry: - ret = nfs42_proc_copy(file_in, pos_in, file_out, pos_out, count); - if (ret == -EAGAIN) - goto retry; - return ret; + return nfs42_proc_copy(file_in, pos_in, file_out, pos_out, count); } static loff_t nfs4_file_llseek(struct file *filep, loff_t offset, int whence) diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c index 96f7d32cd184..898c8321b343 100644 --- a/fs/pstore/ram.c +++ b/fs/pstore/ram.c @@ -128,7 +128,6 @@ ramoops_get_next_prz(struct persistent_ram_zone *przs[], int id, struct pstore_record *record) { struct persistent_ram_zone *prz; - bool update = (record->type == PSTORE_TYPE_DMESG); /* Give up if we never existed or have hit the end. */ if (!przs) @@ -139,7 +138,7 @@ ramoops_get_next_prz(struct persistent_ram_zone *przs[], int id, return NULL; /* Update old/shadowed buffer. */ - if (update) + if (prz->type == PSTORE_TYPE_DMESG) persistent_ram_save_old(prz); if (!persistent_ram_old_size(prz)) @@ -711,18 +710,15 @@ static int ramoops_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct ramoops_platform_data *pdata = dev->platform_data; + struct ramoops_platform_data pdata_local; struct ramoops_context *cxt = &oops_cxt; size_t dump_mem_sz; phys_addr_t paddr; int err = -EINVAL; if (dev_of_node(dev) && !pdata) { - pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); - if (!pdata) { - pr_err("cannot allocate platform data buffer\n"); - err = -ENOMEM; - goto fail_out; - } + pdata = &pdata_local; + memset(pdata, 0, sizeof(*pdata)); err = ramoops_parse_dt(pdev, pdata); if (err < 0) diff --git a/include/dt-bindings/reset/amlogic,meson-axg-reset.h b/include/dt-bindings/reset/amlogic,meson-axg-reset.h index ad6f55dabd6d..0f2e0fe45ca4 100644 --- a/include/dt-bindings/reset/amlogic,meson-axg-reset.h +++ b/include/dt-bindings/reset/amlogic,meson-axg-reset.h @@ -1,12 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */ /* - * * Copyright (c) 2016 BayLibre, SAS. * Author: Neil Armstrong <narmstrong@baylibre.com> * * Copyright (c) 2017 Amlogic, inc. * Author: Yixun Lan <yixun.lan@amlogic.com> * - * SPDX-License-Identifier: (GPL-2.0+ OR BSD) */ #ifndef _DT_BINDINGS_AMLOGIC_MESON_AXG_RESET_H diff --git a/include/linux/bcma/bcma_soc.h b/include/linux/bcma/bcma_soc.h index 7cca5f859a90..f3c43519baa7 100644 --- a/include/linux/bcma/bcma_soc.h +++ b/include/linux/bcma/bcma_soc.h @@ -6,6 +6,7 @@ struct bcma_soc { struct bcma_bus bus; + struct device *dev; }; int __init bcma_host_soc_register(struct bcma_soc *soc); diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h index 39f668d5066b..333a6695a918 100644 --- a/include/linux/compiler-clang.h +++ b/include/linux/compiler-clang.h @@ -3,9 +3,8 @@ #error "Please don't include <linux/compiler-clang.h> directly, include <linux/compiler.h> instead." #endif -/* Some compiler specific definitions are overwritten here - * for Clang compiler - */ +/* Compiler specific definitions for Clang compiler */ + #define uninitialized_var(x) x = *(&(x)) /* same as gcc, this was present in clang-2.6 so we can assume it works diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index dd8268f5f5f0..e8579412ad21 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -58,10 +58,6 @@ (typeof(ptr)) (__ptr + (off)); \ }) -/* Make the optimizer believe the variable can be manipulated arbitrarily. */ -#define OPTIMIZER_HIDE_VAR(var) \ - __asm__ ("" : "=r" (var) : "0" (var)) - /* * A trick to suppress uninitialized variable warning without generating any * code diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h index 517bd14e1222..b17f3cd18334 100644 --- a/include/linux/compiler-intel.h +++ b/include/linux/compiler-intel.h @@ -5,9 +5,7 @@ #ifdef __ECC -/* Some compiler specific definitions are overwritten here - * for Intel ECC compiler - */ +/* Compiler specific definitions for Intel ECC compiler */ #include <asm/intrinsics.h> diff --git a/include/linux/compiler.h b/include/linux/compiler.h index fc5004a4b07d..445348facea9 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -161,7 +161,9 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, #endif #ifndef OPTIMIZER_HIDE_VAR -#define OPTIMIZER_HIDE_VAR(var) barrier() +/* Make the optimizer believe the variable can be manipulated arbitrarily. */ +#define OPTIMIZER_HIDE_VAR(var) \ + __asm__ ("" : "=r" (var) : "0" (var)) #endif /* Not-quite-unique ID. */ diff --git a/include/linux/fb.h b/include/linux/fb.h index 7cdd31a69719..f52ef0ad6781 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -653,6 +653,7 @@ extern int fb_new_modelist(struct fb_info *info); extern struct fb_info *registered_fb[FB_MAX]; extern int num_registered_fb; +extern bool fb_center_logo; extern struct class *fb_class; #define for_each_registered_fb(i) \ diff --git a/include/linux/if_arp.h b/include/linux/if_arp.h index 6756fea18b69..e44746de95cd 100644 --- a/include/linux/if_arp.h +++ b/include/linux/if_arp.h @@ -54,6 +54,7 @@ static inline bool dev_is_mac_header_xmit(const struct net_device *dev) case ARPHRD_IPGRE: case ARPHRD_VOID: case ARPHRD_NONE: + case ARPHRD_RAWIP: return false; default: return true; diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h index 5440f11b0907..7315977b64da 100644 --- a/include/linux/libnvdimm.h +++ b/include/linux/libnvdimm.h @@ -160,6 +160,7 @@ static inline struct nd_blk_region_desc *to_blk_region_desc( } enum nvdimm_security_state { + NVDIMM_SECURITY_ERROR = -1, NVDIMM_SECURITY_DISABLED, NVDIMM_SECURITY_UNLOCKED, NVDIMM_SECURITY_LOCKED, diff --git a/include/linux/of.h b/include/linux/of.h index fe472e5195a9..e240992e5cb6 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -50,7 +50,6 @@ struct of_irq_controller; struct device_node { const char *name; - const char *type; phandle phandle; const char *full_name; struct fwnode_handle fwnode; diff --git a/include/linux/phy.h b/include/linux/phy.h index 55114657a577..ef20aeea10cc 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -469,8 +469,8 @@ struct phy_device { * only works for PHYs with IDs which match this field * name: The friendly name of this PHY type * phy_id_mask: Defines the important bits of the phy_id - * features: A list of features (speed, duplex, etc) supported - * by this PHY + * features: A mandatory list of features (speed, duplex, etc) + * supported by this PHY * flags: A bitfield defining certain other features this PHY * supports (like interrupts) * diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index 91c536a01b56..c2a1b7dbe4eb 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -764,6 +764,7 @@ struct qed_probe_params { u32 dp_module; u8 dp_level; bool is_vf; + bool recov_in_prog; }; #define QED_DRV_VER_STR_SIZE 12 @@ -810,6 +811,7 @@ struct qed_common_cb_ops { void (*arfs_filter_op)(void *dev, void *fltr, u8 fw_rc); void (*link_update)(void *dev, struct qed_link_output *link); + void (*schedule_recovery_handler)(void *dev); void (*dcbx_aen)(void *dev, struct qed_dcbx_get *get, u32 mib_type); void (*get_generic_tlv_data)(void *dev, struct qed_generic_tlvs *data); void (*get_protocol_tlv_data)(void *dev, void *data); @@ -1058,6 +1060,24 @@ struct qed_common_ops { void __iomem *db_addr, void *db_data); /** + * @brief recovery_process - Trigger a recovery process + * + * @param cdev + * + * @return 0 on success, error otherwise. + */ + int (*recovery_process)(struct qed_dev *cdev); + +/** + * @brief recovery_prolog - Execute the prolog operations of a recovery process + * + * @param cdev + * + * @return 0 on success, error otherwise. + */ + int (*recovery_prolog)(struct qed_dev *cdev); + +/** * @brief update_drv_state - API to inform the change in the driver state. * * @param cdev diff --git a/include/linux/qed/qede_rdma.h b/include/linux/qed/qede_rdma.h index 9904617a9730..e29d7199c10e 100644 --- a/include/linux/qed/qede_rdma.h +++ b/include/linux/qed/qede_rdma.h @@ -55,6 +55,16 @@ struct qede_rdma_event_work { enum qede_rdma_event event; }; +enum qede_rdma_probe_mode { + QEDE_RDMA_PROBE_NORMAL, + QEDE_RDMA_PROBE_RECOVERY, +}; + +enum qede_rdma_remove_mode { + QEDE_RDMA_REMOVE_NORMAL, + QEDE_RDMA_REMOVE_RECOVERY, +}; + struct qedr_driver { unsigned char name[32]; @@ -74,21 +84,24 @@ void qede_rdma_unregister_driver(struct qedr_driver *drv); bool qede_rdma_supported(struct qede_dev *dev); #if IS_ENABLED(CONFIG_QED_RDMA) -int qede_rdma_dev_add(struct qede_dev *dev); +int qede_rdma_dev_add(struct qede_dev *dev, enum qede_rdma_probe_mode mode); void qede_rdma_dev_event_open(struct qede_dev *dev); void qede_rdma_dev_event_close(struct qede_dev *dev); -void qede_rdma_dev_remove(struct qede_dev *dev); +void qede_rdma_dev_remove(struct qede_dev *dev, + enum qede_rdma_remove_mode mode); void qede_rdma_event_changeaddr(struct qede_dev *edr); #else -static inline int qede_rdma_dev_add(struct qede_dev *dev) +static inline int qede_rdma_dev_add(struct qede_dev *dev, + enum qede_rdma_probe_mode mode) { return 0; } static inline void qede_rdma_dev_event_open(struct qede_dev *dev) {} static inline void qede_rdma_dev_event_close(struct qede_dev *dev) {} -static inline void qede_rdma_dev_remove(struct qede_dev *dev) {} +static inline void qede_rdma_dev_remove(struct qede_dev *dev, + enum qede_rdma_remove_mode mode) {} static inline void qede_rdma_event_changeaddr(struct qede_dev *edr) {} #endif #endif diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index 32baf8e26735..987b6491b946 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h @@ -12,6 +12,11 @@ struct irq_affinity; /** * virtio_config_ops - operations for configuring a virtio device + * Note: Do not assume that a transport implements all of the operations + * getting/setting a value as a simple read/write! Generally speaking, + * any of @get/@set, @get_status/@set_status, or @get_features/ + * @finalize_features are NOT safe to be called from an atomic + * context. * @get: read the value of a configuration field * vdev: the virtio_device * offset: the offset of the configuration field @@ -22,7 +27,7 @@ struct irq_affinity; * offset: the offset of the configuration field * buf: the buffer to read the field value from. * len: the length of the buffer - * @generation: config generation counter + * @generation: config generation counter (optional) * vdev: the virtio_device * Returns the config generation counter * @get_status: read the status byte @@ -48,17 +53,17 @@ struct irq_affinity; * @del_vqs: free virtqueues found by find_vqs(). * @get_features: get the array of feature bits for this device. * vdev: the virtio_device - * Returns the first 32 feature bits (all we currently need). + * Returns the first 64 feature bits (all we currently need). * @finalize_features: confirm what device features we'll be using. * vdev: the virtio_device * This gives the final feature bits for the device: it can change * the dev->feature bits if it wants. * Returns 0 on success or error status - * @bus_name: return the bus name associated with the device + * @bus_name: return the bus name associated with the device (optional) * vdev: the virtio_device * This returns a pointer to the bus name a la pci_name from which * the caller can then copy. - * @set_vq_affinity: set the affinity for a virtqueue. + * @set_vq_affinity: set the affinity for a virtqueue (optional). * @get_vq_affinity: get the affinity for a virtqueue (optional). */ typedef void vq_callback_t(struct virtqueue *); diff --git a/include/net/ax25.h b/include/net/ax25.h index 3f9aea8087e3..8b7eb46ad72d 100644 --- a/include/net/ax25.h +++ b/include/net/ax25.h @@ -201,6 +201,18 @@ static inline void ax25_hold_route(ax25_route *ax25_rt) void __ax25_put_route(ax25_route *ax25_rt); +extern rwlock_t ax25_route_lock; + +static inline void ax25_route_lock_use(void) +{ + read_lock(&ax25_route_lock); +} + +static inline void ax25_route_lock_unuse(void) +{ + read_unlock(&ax25_route_lock); +} + static inline void ax25_put_route(ax25_route *ax25_rt) { if (refcount_dec_and_test(&ax25_rt->refcount)) diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h index 33d291888ba9..e3f005eae1f7 100644 --- a/include/trace/events/afs.h +++ b/include/trace/events/afs.h @@ -25,6 +25,7 @@ enum afs_call_trace { afs_call_trace_alloc, afs_call_trace_free, + afs_call_trace_get, afs_call_trace_put, afs_call_trace_wake, afs_call_trace_work, @@ -159,6 +160,7 @@ enum afs_file_error { #define afs_call_traces \ EM(afs_call_trace_alloc, "ALLOC") \ EM(afs_call_trace_free, "FREE ") \ + EM(afs_call_trace_get, "GET ") \ EM(afs_call_trace_put, "PUT ") \ EM(afs_call_trace_wake, "WAKE ") \ E_(afs_call_trace_work, "WORK ") diff --git a/include/uapi/rdma/vmw_pvrdma-abi.h b/include/uapi/rdma/vmw_pvrdma-abi.h index d13fd490b66d..6e73f0274e41 100644 --- a/include/uapi/rdma/vmw_pvrdma-abi.h +++ b/include/uapi/rdma/vmw_pvrdma-abi.h @@ -78,6 +78,7 @@ enum pvrdma_wr_opcode { PVRDMA_WR_MASKED_ATOMIC_FETCH_AND_ADD, PVRDMA_WR_BIND_MW, PVRDMA_WR_REG_SIG_MR, + PVRDMA_WR_ERROR, }; enum pvrdma_wc_status { diff --git a/init/Kconfig b/init/Kconfig index d47cb77a220e..513fa544a134 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1124,6 +1124,7 @@ config LD_DEAD_CODE_DATA_ELIMINATION bool "Dead code and data elimination (EXPERIMENTAL)" depends on HAVE_LD_DEAD_CODE_DATA_ELIMINATION depends on EXPERT + depends on !(FUNCTION_TRACER && CC_IS_GCC && GCC_VERSION < 40800) depends on $(cc-option,-ffunction-sections -fdata-sections) depends on $(ld-option,--gc-sections) help diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index a2f53642592b..befe570be5ba 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -467,7 +467,7 @@ static const struct btf_kind_operations *btf_type_ops(const struct btf_type *t) return kind_ops[BTF_INFO_KIND(t->info)]; } -bool btf_name_offset_valid(const struct btf *btf, u32 offset) +static bool btf_name_offset_valid(const struct btf *btf, u32 offset) { return BTF_STR_OFFSET_VALID(offset) && offset < btf->hdr.str_len; diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c index 9425c2fb872f..ab612fe9862f 100644 --- a/kernel/bpf/cgroup.c +++ b/kernel/bpf/cgroup.c @@ -718,6 +718,7 @@ cgroup_dev_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) case BPF_FUNC_trace_printk: if (capable(CAP_SYS_ADMIN)) return bpf_get_trace_printk_proto(); + /* fall through */ default: return NULL; } diff --git a/kernel/bpf/map_in_map.c b/kernel/bpf/map_in_map.c index 99d243e1ad6e..52378d3e34b3 100644 --- a/kernel/bpf/map_in_map.c +++ b/kernel/bpf/map_in_map.c @@ -12,6 +12,7 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd) { struct bpf_map *inner_map, *inner_map_meta; + u32 inner_map_meta_size; struct fd f; f = fdget(inner_map_ufd); @@ -36,7 +37,12 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd) return ERR_PTR(-EINVAL); } - inner_map_meta = kzalloc(sizeof(*inner_map_meta), GFP_USER); + inner_map_meta_size = sizeof(*inner_map_meta); + /* In some cases verifier needs to access beyond just base map. */ + if (inner_map->ops == &array_map_ops) + inner_map_meta_size = sizeof(struct bpf_array); + + inner_map_meta = kzalloc(inner_map_meta_size, GFP_USER); if (!inner_map_meta) { fdput(f); return ERR_PTR(-ENOMEM); @@ -46,9 +52,16 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd) inner_map_meta->key_size = inner_map->key_size; inner_map_meta->value_size = inner_map->value_size; inner_map_meta->map_flags = inner_map->map_flags; - inner_map_meta->ops = inner_map->ops; inner_map_meta->max_entries = inner_map->max_entries; + /* Misc members not needed in bpf_map_meta_equal() check. */ + inner_map_meta->ops = inner_map->ops; + if (inner_map->ops == &array_map_ops) { + inner_map_meta->unpriv_array = inner_map->unpriv_array; + container_of(inner_map_meta, struct bpf_array, map)->index_mask = + container_of(inner_map, struct bpf_array, map)->index_mask; + } + fdput(f); return inner_map_meta; } diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c index d9e2483669d0..d43b14535827 100644 --- a/kernel/bpf/stackmap.c +++ b/kernel/bpf/stackmap.c @@ -180,11 +180,14 @@ static inline int stack_map_parse_build_id(void *page_addr, if (nhdr->n_type == BPF_BUILD_ID && nhdr->n_namesz == sizeof("GNU") && - nhdr->n_descsz == BPF_BUILD_ID_SIZE) { + nhdr->n_descsz > 0 && + nhdr->n_descsz <= BPF_BUILD_ID_SIZE) { memcpy(build_id, note_start + note_offs + ALIGN(sizeof("GNU"), 4) + sizeof(Elf32_Nhdr), - BPF_BUILD_ID_SIZE); + nhdr->n_descsz); + memset(build_id + nhdr->n_descsz, 0, + BPF_BUILD_ID_SIZE - nhdr->n_descsz); return 0; } new_offs = note_offs + sizeof(Elf32_Nhdr) + @@ -311,6 +314,7 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs, for (i = 0; i < trace_nr; i++) { id_offs[i].status = BPF_STACK_BUILD_ID_IP; id_offs[i].ip = ips[i]; + memset(id_offs[i].build_id, 0, BPF_BUILD_ID_SIZE); } return; } @@ -321,6 +325,7 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs, /* per entry fall back to ips */ id_offs[i].status = BPF_STACK_BUILD_ID_IP; id_offs[i].ip = ips[i]; + memset(id_offs[i].build_id, 0, BPF_BUILD_ID_SIZE); continue; } id_offs[i].offset = (vma->vm_pgoff << PAGE_SHIFT) + ips[i] diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index d6361776dc5c..1fb6fd68b9c7 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -378,6 +378,8 @@ void __init swiotlb_exit(void) memblock_free_late(io_tlb_start, PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); } + io_tlb_start = 0; + io_tlb_end = 0; io_tlb_nslabs = 0; max_segment = 0; } diff --git a/kernel/seccomp.c b/kernel/seccomp.c index d7f538847b84..e815781ed751 100644 --- a/kernel/seccomp.c +++ b/kernel/seccomp.c @@ -976,6 +976,9 @@ static int seccomp_notify_release(struct inode *inode, struct file *file) struct seccomp_filter *filter = file->private_data; struct seccomp_knotif *knotif; + if (!filter) + return 0; + mutex_lock(&filter->notify_lock); /* @@ -1300,6 +1303,7 @@ out: out_put_fd: if (flags & SECCOMP_FILTER_FLAG_NEW_LISTENER) { if (ret < 0) { + listener_f->private_data = NULL; fput(listener_f); put_unused_fd(listener); } else { diff --git a/lib/int_sqrt.c b/lib/int_sqrt.c index 14436f4ca6bd..30e0f9770f88 100644 --- a/lib/int_sqrt.c +++ b/lib/int_sqrt.c @@ -52,7 +52,7 @@ u32 int_sqrt64(u64 x) if (x <= ULONG_MAX) return int_sqrt((unsigned long) x); - m = 1ULL << (fls64(x) & ~1ULL); + m = 1ULL << ((fls64(x) - 1) & ~1ULL); while (m != 0) { b = y + m; y >>= 1; diff --git a/net/ax25/ax25_ip.c b/net/ax25/ax25_ip.c index 70417e9b932d..314bbc8010fb 100644 --- a/net/ax25/ax25_ip.c +++ b/net/ax25/ax25_ip.c @@ -114,6 +114,7 @@ netdev_tx_t ax25_ip_xmit(struct sk_buff *skb) dst = (ax25_address *)(bp + 1); src = (ax25_address *)(bp + 8); + ax25_route_lock_use(); route = ax25_get_route(dst, NULL); if (route) { digipeat = route->digipeat; @@ -206,9 +207,8 @@ netdev_tx_t ax25_ip_xmit(struct sk_buff *skb) ax25_queue_xmit(skb, dev); put: - if (route) - ax25_put_route(route); + ax25_route_lock_unuse(); return NETDEV_TX_OK; } diff --git a/net/ax25/ax25_route.c b/net/ax25/ax25_route.c index a0eff323af12..66f74c85cf6b 100644 --- a/net/ax25/ax25_route.c +++ b/net/ax25/ax25_route.c @@ -40,7 +40,7 @@ #include <linux/export.h> static ax25_route *ax25_route_list; -static DEFINE_RWLOCK(ax25_route_lock); +DEFINE_RWLOCK(ax25_route_lock); void ax25_rt_device_down(struct net_device *dev) { @@ -335,6 +335,7 @@ const struct seq_operations ax25_rt_seqops = { * Find AX.25 route * * Only routes with a reference count of zero can be destroyed. + * Must be called with ax25_route_lock read locked. */ ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev) { @@ -342,7 +343,6 @@ ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev) ax25_route *ax25_def_rt = NULL; ax25_route *ax25_rt; - read_lock(&ax25_route_lock); /* * Bind to the physical interface we heard them on, or the default * route if none is found; @@ -365,11 +365,6 @@ ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev) if (ax25_spe_rt != NULL) ax25_rt = ax25_spe_rt; - if (ax25_rt != NULL) - ax25_hold_route(ax25_rt); - - read_unlock(&ax25_route_lock); - return ax25_rt; } @@ -400,9 +395,12 @@ int ax25_rt_autobind(ax25_cb *ax25, ax25_address *addr) ax25_route *ax25_rt; int err = 0; - if ((ax25_rt = ax25_get_route(addr, NULL)) == NULL) + ax25_route_lock_use(); + ax25_rt = ax25_get_route(addr, NULL); + if (!ax25_rt) { + ax25_route_lock_unuse(); return -EHOSTUNREACH; - + } if ((ax25->ax25_dev = ax25_dev_ax25dev(ax25_rt->dev)) == NULL) { err = -EHOSTUNREACH; goto put; @@ -437,8 +435,7 @@ int ax25_rt_autobind(ax25_cb *ax25, ax25_address *addr) } put: - ax25_put_route(ax25_rt); - + ax25_route_lock_unuse(); return err; } diff --git a/net/can/bcm.c b/net/can/bcm.c index 0af8f0db892a..79bb8afa9c0c 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c @@ -67,6 +67,9 @@ */ #define MAX_NFRAMES 256 +/* limit timers to 400 days for sending/timeouts */ +#define BCM_TIMER_SEC_MAX (400 * 24 * 60 * 60) + /* use of last_frames[index].flags */ #define RX_RECV 0x40 /* received data for this element */ #define RX_THR 0x80 /* element not been sent due to throttle feature */ @@ -140,6 +143,22 @@ static inline ktime_t bcm_timeval_to_ktime(struct bcm_timeval tv) return ktime_set(tv.tv_sec, tv.tv_usec * NSEC_PER_USEC); } +/* check limitations for timeval provided by user */ +static bool bcm_is_invalid_tv(struct bcm_msg_head *msg_head) +{ + if ((msg_head->ival1.tv_sec < 0) || + (msg_head->ival1.tv_sec > BCM_TIMER_SEC_MAX) || + (msg_head->ival1.tv_usec < 0) || + (msg_head->ival1.tv_usec >= USEC_PER_SEC) || + (msg_head->ival2.tv_sec < 0) || + (msg_head->ival2.tv_sec > BCM_TIMER_SEC_MAX) || + (msg_head->ival2.tv_usec < 0) || + (msg_head->ival2.tv_usec >= USEC_PER_SEC)) + return true; + + return false; +} + #define CFSIZ(flags) ((flags & CAN_FD_FRAME) ? CANFD_MTU : CAN_MTU) #define OPSIZ sizeof(struct bcm_op) #define MHSIZ sizeof(struct bcm_msg_head) @@ -873,6 +892,10 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, if (msg_head->nframes < 1 || msg_head->nframes > MAX_NFRAMES) return -EINVAL; + /* check timeval limitations */ + if ((msg_head->flags & SETTIMER) && bcm_is_invalid_tv(msg_head)) + return -EINVAL; + /* check the given can_id */ op = bcm_find_op(&bo->tx_ops, msg_head, ifindex); if (op) { @@ -1053,6 +1076,10 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, (!(msg_head->can_id & CAN_RTR_FLAG)))) return -EINVAL; + /* check timeval limitations */ + if ((msg_head->flags & SETTIMER) && bcm_is_invalid_tv(msg_head)) + return -EINVAL; + /* check the given can_id */ op = bcm_find_op(&bo->rx_ops, msg_head, ifindex); if (op) { diff --git a/net/core/filter.c b/net/core/filter.c index 2b3b436ef545..7559d6835ecb 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -2020,18 +2020,19 @@ static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb) static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev, u32 flags) { - /* skb->mac_len is not set on normal egress */ - unsigned int mlen = skb->network_header - skb->mac_header; + unsigned int mlen = skb_network_offset(skb); - __skb_pull(skb, mlen); + if (mlen) { + __skb_pull(skb, mlen); - /* At ingress, the mac header has already been pulled once. - * At egress, skb_pospull_rcsum has to be done in case that - * the skb is originated from ingress (i.e. a forwarded skb) - * to ensure that rcsum starts at net header. - */ - if (!skb_at_tc_ingress(skb)) - skb_postpull_rcsum(skb, skb_mac_header(skb), mlen); + /* At ingress, the mac header has already been pulled once. + * At egress, skb_pospull_rcsum has to be done in case that + * the skb is originated from ingress (i.e. a forwarded skb) + * to ensure that rcsum starts at net header. + */ + if (!skb_at_tc_ingress(skb)) + skb_postpull_rcsum(skb, skb_mac_header(skb), mlen); + } skb_pop_mac_header(skb); skb_reset_mac_len(skb); return flags & BPF_F_INGRESS ? @@ -4119,6 +4120,10 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock, sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF); break; case SO_MAX_PACING_RATE: /* 32bit version */ + if (val != ~0U) + cmpxchg(&sk->sk_pacing_status, + SK_PACING_NONE, + SK_PACING_NEEDED); sk->sk_max_pacing_rate = (val == ~0U) ? ~0UL : val; sk->sk_pacing_rate = min(sk->sk_pacing_rate, sk->sk_max_pacing_rate); @@ -4132,7 +4137,10 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock, sk->sk_rcvlowat = val ? : 1; break; case SO_MARK: - sk->sk_mark = val; + if (sk->sk_mark != val) { + sk->sk_mark = val; + sk_dst_reset(sk); + } break; default: ret = -EINVAL; @@ -5309,7 +5317,7 @@ bpf_base_func_proto(enum bpf_func_id func_id) case BPF_FUNC_trace_printk: if (capable(CAP_SYS_ADMIN)) return bpf_get_trace_printk_proto(); - /* else: fall through */ + /* else, fall through */ default: return NULL; } diff --git a/net/core/lwt_bpf.c b/net/core/lwt_bpf.c index 3e85437f7106..a648568c5e8f 100644 --- a/net/core/lwt_bpf.c +++ b/net/core/lwt_bpf.c @@ -63,6 +63,7 @@ static int run_lwt_bpf(struct sk_buff *skb, struct bpf_lwt_prog *lwt, lwt->name ? : "<unknown>"); ret = BPF_OK; } else { + skb_reset_mac_header(skb); ret = skb_do_redirect(skb); if (ret == 0) ret = BPF_REDIRECT; diff --git a/net/ipv4/gre_demux.c b/net/ipv4/gre_demux.c index a4bf22ee3aed..7c4a41dc04bb 100644 --- a/net/ipv4/gre_demux.c +++ b/net/ipv4/gre_demux.c @@ -25,6 +25,7 @@ #include <linux/spinlock.h> #include <net/protocol.h> #include <net/gre.h> +#include <net/erspan.h> #include <net/icmp.h> #include <net/route.h> @@ -119,6 +120,22 @@ int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi, hdr_len += 4; } tpi->hdr_len = hdr_len; + + /* ERSPAN ver 1 and 2 protocol sets GRE key field + * to 0 and sets the configured key in the + * inner erspan header field + */ + if (greh->protocol == htons(ETH_P_ERSPAN) || + greh->protocol == htons(ETH_P_ERSPAN2)) { + struct erspan_base_hdr *ershdr; + + if (!pskb_may_pull(skb, nhs + hdr_len + sizeof(*ershdr))) + return -EINVAL; + + ershdr = (struct erspan_base_hdr *)options; + tpi->key = cpu_to_be32(get_session_id(ershdr)); + } + return hdr_len; } EXPORT_SYMBOL(gre_parse_header); diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index b1a74d80d868..20a64fe6254b 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -268,20 +268,11 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi, int len; itn = net_generic(net, erspan_net_id); - len = gre_hdr_len + sizeof(*ershdr); - - /* Check based hdr len */ - if (unlikely(!pskb_may_pull(skb, len))) - return PACKET_REJECT; iph = ip_hdr(skb); ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len); ver = ershdr->ver; - /* The original GRE header does not have key field, - * Use ERSPAN 10-bit session ID as key. - */ - tpi->key = cpu_to_be32(get_session_id(ershdr)); tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags | TUNNEL_KEY, iph->saddr, iph->daddr, tpi->key); diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index c4f5602308ed..054d01c16dc6 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c @@ -644,13 +644,19 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, dst = tnl_params->daddr; if (dst == 0) { /* NBMA tunnel */ + struct ip_tunnel_info *tun_info; if (!skb_dst(skb)) { dev->stats.tx_fifo_errors++; goto tx_error; } - if (skb->protocol == htons(ETH_P_IP)) { + tun_info = skb_tunnel_info(skb); + if (tun_info && (tun_info->mode & IP_TUNNEL_INFO_TX) && + ip_tunnel_info_af(tun_info) == AF_INET && + tun_info->key.u.ipv4.dst) + dst = tun_info->key.u.ipv4.dst; + else if (skb->protocol == htons(ETH_P_IP)) { rt = skb_rtable(skb); dst = rt_nexthop(rt, inner_iph->daddr); } diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 93d5ad2b1a69..84c358804355 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -3495,8 +3495,8 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, if (!addrconf_link_ready(dev)) { /* device is not ready yet. */ - pr_info("ADDRCONF(NETDEV_UP): %s: link is not ready\n", - dev->name); + pr_debug("ADDRCONF(NETDEV_UP): %s: link is not ready\n", + dev->name); break; } @@ -5120,6 +5120,8 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb, if (idev) { err = in6_dump_addrs(idev, skb, cb, s_ip_idx, &fillargs); + if (err > 0) + err = 0; } goto put_tgt_net; } diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index b1be67ca6768..4416368dbd49 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -534,13 +534,9 @@ static int ip6erspan_rcv(struct sk_buff *skb, int gre_hdr_len, struct ip6_tnl *tunnel; u8 ver; - if (unlikely(!pskb_may_pull(skb, sizeof(*ershdr)))) - return PACKET_REJECT; - ipv6h = ipv6_hdr(skb); ershdr = (struct erspan_base_hdr *)skb->data; ver = ershdr->ver; - tpi->key = cpu_to_be32(get_session_id(ershdr)); tunnel = ip6gre_tunnel_lookup(skb->dev, &ipv6h->saddr, &ipv6h->daddr, tpi->key, diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index ed8e006dae85..6200cd2b4b99 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -280,7 +280,8 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, if (saddr) { fl6->saddr = saddr->v6.sin6_addr; - fl6->fl6_sport = saddr->v6.sin6_port; + if (!fl6->fl6_sport) + fl6->fl6_sport = saddr->v6.sin6_port; pr_debug("src=%pI6 - ", &fl6->saddr); } diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index 4e0eeb113ef5..6abc8b274270 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -440,7 +440,8 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr, } if (saddr) { fl4->saddr = saddr->v4.sin_addr.s_addr; - fl4->fl4_sport = saddr->v4.sin_port; + if (!fl4->fl4_sport) + fl4->fl4_sport = saddr->v4.sin_port; } pr_debug("%s: dst:%pI4, src:%pI4 - ", __func__, &fl4->daddr, diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index f4ac6c592e13..d05c57664e36 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -495,7 +495,10 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc, * * [INIT ACK back to where the INIT came from.] */ - retval->transport = chunk->transport; + if (chunk->transport) + retval->transport = + sctp_assoc_lookup_paddr(asoc, + &chunk->transport->ipaddr); retval->subh.init_hdr = sctp_addto_chunk(retval, sizeof(initack), &initack); @@ -642,8 +645,10 @@ struct sctp_chunk *sctp_make_cookie_ack(const struct sctp_association *asoc, * * [COOKIE ACK back to where the COOKIE ECHO came from.] */ - if (retval && chunk) - retval->transport = chunk->transport; + if (retval && chunk && chunk->transport) + retval->transport = + sctp_assoc_lookup_paddr(asoc, + &chunk->transport->ipaddr); return retval; } diff --git a/net/sctp/stream.c b/net/sctp/stream.c index 3892e7630f3a..80e0ae5534ec 100644 --- a/net/sctp/stream.c +++ b/net/sctp/stream.c @@ -585,9 +585,9 @@ struct sctp_chunk *sctp_process_strreset_outreq( struct sctp_strreset_outreq *outreq = param.v; struct sctp_stream *stream = &asoc->stream; __u32 result = SCTP_STRRESET_DENIED; - __u16 i, nums, flags = 0; __be16 *str_p = NULL; __u32 request_seq; + __u16 i, nums; request_seq = ntohl(outreq->request_seq); @@ -615,6 +615,15 @@ struct sctp_chunk *sctp_process_strreset_outreq( if (!(asoc->strreset_enable & SCTP_ENABLE_RESET_STREAM_REQ)) goto out; + nums = (ntohs(param.p->length) - sizeof(*outreq)) / sizeof(__u16); + str_p = outreq->list_of_streams; + for (i = 0; i < nums; i++) { + if (ntohs(str_p[i]) >= stream->incnt) { + result = SCTP_STRRESET_ERR_WRONG_SSN; + goto out; + } + } + if (asoc->strreset_chunk) { if (!sctp_chunk_lookup_strreset_param( asoc, outreq->response_seq, @@ -637,32 +646,19 @@ struct sctp_chunk *sctp_process_strreset_outreq( sctp_chunk_put(asoc->strreset_chunk); asoc->strreset_chunk = NULL; } - - flags = SCTP_STREAM_RESET_INCOMING_SSN; } - nums = (ntohs(param.p->length) - sizeof(*outreq)) / sizeof(__u16); - if (nums) { - str_p = outreq->list_of_streams; - for (i = 0; i < nums; i++) { - if (ntohs(str_p[i]) >= stream->incnt) { - result = SCTP_STRRESET_ERR_WRONG_SSN; - goto out; - } - } - + if (nums) for (i = 0; i < nums; i++) SCTP_SI(stream, ntohs(str_p[i]))->mid = 0; - } else { + else for (i = 0; i < stream->incnt; i++) SCTP_SI(stream, i)->mid = 0; - } result = SCTP_STRRESET_PERFORMED; *evp = sctp_ulpevent_make_stream_reset_event(asoc, - flags | SCTP_STREAM_RESET_OUTGOING_SSN, nums, str_p, - GFP_ATOMIC); + SCTP_STREAM_RESET_INCOMING_SSN, nums, str_p, GFP_ATOMIC); out: sctp_update_strreset_result(asoc, result); @@ -738,9 +734,6 @@ struct sctp_chunk *sctp_process_strreset_inreq( result = SCTP_STRRESET_PERFORMED; - *evp = sctp_ulpevent_make_stream_reset_event(asoc, - SCTP_STREAM_RESET_INCOMING_SSN, nums, str_p, GFP_ATOMIC); - out: sctp_update_strreset_result(asoc, result); err: @@ -873,6 +866,14 @@ struct sctp_chunk *sctp_process_strreset_addstrm_out( if (!(asoc->strreset_enable & SCTP_ENABLE_CHANGE_ASSOC_REQ)) goto out; + in = ntohs(addstrm->number_of_streams); + incnt = stream->incnt + in; + if (!in || incnt > SCTP_MAX_STREAM) + goto out; + + if (sctp_stream_alloc_in(stream, incnt, GFP_ATOMIC)) + goto out; + if (asoc->strreset_chunk) { if (!sctp_chunk_lookup_strreset_param( asoc, 0, SCTP_PARAM_RESET_ADD_IN_STREAMS)) { @@ -896,14 +897,6 @@ struct sctp_chunk *sctp_process_strreset_addstrm_out( } } - in = ntohs(addstrm->number_of_streams); - incnt = stream->incnt + in; - if (!in || incnt > SCTP_MAX_STREAM) - goto out; - - if (sctp_stream_alloc_in(stream, incnt, GFP_ATOMIC)) - goto out; - stream->incnt = incnt; result = SCTP_STRRESET_PERFORMED; @@ -973,9 +966,6 @@ struct sctp_chunk *sctp_process_strreset_addstrm_in( result = SCTP_STRRESET_PERFORMED; - *evp = sctp_ulpevent_make_stream_change_event(asoc, - 0, 0, ntohs(addstrm->number_of_streams), GFP_ATOMIC); - out: sctp_update_strreset_result(asoc, result); err: @@ -1036,10 +1026,10 @@ struct sctp_chunk *sctp_process_strreset_resp( sout->mid_uo = 0; } } - - flags = SCTP_STREAM_RESET_OUTGOING_SSN; } + flags |= SCTP_STREAM_RESET_OUTGOING_SSN; + for (i = 0; i < stream->outcnt; i++) SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN; @@ -1058,6 +1048,8 @@ struct sctp_chunk *sctp_process_strreset_resp( nums = (ntohs(inreq->param_hdr.length) - sizeof(*inreq)) / sizeof(__u16); + flags |= SCTP_STREAM_RESET_INCOMING_SSN; + *evp = sctp_ulpevent_make_stream_reset_event(asoc, flags, nums, str_p, GFP_ATOMIC); } else if (req->type == SCTP_PARAM_RESET_TSN_REQUEST) { diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c index 1ff9768f5456..f3023bbc0b7f 100644 --- a/net/sunrpc/auth.c +++ b/net/sunrpc/auth.c @@ -41,6 +41,9 @@ static unsigned long number_cred_unused; static struct cred machine_cred = { .usage = ATOMIC_INIT(1), +#ifdef CONFIG_DEBUG_CREDENTIALS + .magic = CRED_MAGIC, +#endif }; /* diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index dc86713b32b6..1531b0219344 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c @@ -1549,8 +1549,10 @@ gss_marshal(struct rpc_task *task, __be32 *p) cred_len = p++; spin_lock(&ctx->gc_seq_lock); - req->rq_seqno = ctx->gc_seq++; + req->rq_seqno = (ctx->gc_seq < MAXSEQ) ? ctx->gc_seq++ : MAXSEQ; spin_unlock(&ctx->gc_seq_lock); + if (req->rq_seqno == MAXSEQ) + goto out_expired; *p++ = htonl((u32) RPC_GSS_VERSION); *p++ = htonl((u32) ctx->gc_proc); @@ -1572,14 +1574,18 @@ gss_marshal(struct rpc_task *task, __be32 *p) mic.data = (u8 *)(p + 1); maj_stat = gss_get_mic(ctx->gc_gss_ctx, &verf_buf, &mic); if (maj_stat == GSS_S_CONTEXT_EXPIRED) { - clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); + goto out_expired; } else if (maj_stat != 0) { - printk("gss_marshal: gss_get_mic FAILED (%d)\n", maj_stat); + pr_warn("gss_marshal: gss_get_mic FAILED (%d)\n", maj_stat); + task->tk_status = -EIO; goto out_put_ctx; } p = xdr_encode_opaque(p, NULL, mic.len); gss_put_ctx(ctx); return p; +out_expired: + clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); + task->tk_status = -EKEYEXPIRED; out_put_ctx: gss_put_ctx(ctx); return NULL; diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 71d9599b5816..d7ec6132c046 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -1739,14 +1739,10 @@ rpc_xdr_encode(struct rpc_task *task) xdr_buf_init(&req->rq_rcv_buf, req->rq_rbuffer, req->rq_rcvsize); - req->rq_bytes_sent = 0; p = rpc_encode_header(task); - if (p == NULL) { - printk(KERN_INFO "RPC: couldn't encode RPC header, exit EIO\n"); - rpc_exit(task, -EIO); + if (p == NULL) return; - } encode = task->tk_msg.rpc_proc->p_encode; if (encode == NULL) @@ -1771,10 +1767,17 @@ call_encode(struct rpc_task *task) /* Did the encode result in an error condition? */ if (task->tk_status != 0) { /* Was the error nonfatal? */ - if (task->tk_status == -EAGAIN || task->tk_status == -ENOMEM) + switch (task->tk_status) { + case -EAGAIN: + case -ENOMEM: rpc_delay(task, HZ >> 4); - else + break; + case -EKEYEXPIRED: + task->tk_action = call_refresh; + break; + default: rpc_exit(task, task->tk_status); + } return; } @@ -2336,7 +2339,8 @@ rpc_encode_header(struct rpc_task *task) *p++ = htonl(clnt->cl_vers); /* program version */ *p++ = htonl(task->tk_msg.rpc_proc->p_proc); /* procedure */ p = rpcauth_marshcred(task, p); - req->rq_slen = xdr_adjust_iovec(&req->rq_svec[0], p); + if (p) + req->rq_slen = xdr_adjust_iovec(&req->rq_svec[0], p); return p; } diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 73547d17d3c6..f1ec2110efeb 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -1151,6 +1151,7 @@ xprt_request_enqueue_transmit(struct rpc_task *task) struct rpc_xprt *xprt = req->rq_xprt; if (xprt_request_need_enqueue_transmit(task, req)) { + req->rq_bytes_sent = 0; spin_lock(&xprt->queue_lock); /* * Requests that carry congestion control credits are added @@ -1177,7 +1178,7 @@ xprt_request_enqueue_transmit(struct rpc_task *task) INIT_LIST_HEAD(&req->rq_xmit2); goto out; } - } else { + } else if (!req->rq_seqno) { list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) { if (pos->rq_task->tk_owner != task->tk_owner) continue; diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index 7749a2bf6887..4994e75945b8 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c @@ -845,17 +845,13 @@ static int rpcrdma_sendctxs_create(struct rpcrdma_xprt *r_xprt) for (i = 0; i <= buf->rb_sc_last; i++) { sc = rpcrdma_sendctx_create(&r_xprt->rx_ia); if (!sc) - goto out_destroy; + return -ENOMEM; sc->sc_xprt = r_xprt; buf->rb_sc_ctxs[i] = sc; } return 0; - -out_destroy: - rpcrdma_sendctxs_destroy(buf); - return -ENOMEM; } /* The sendctx queue is not guaranteed to have a size that is a @@ -1113,8 +1109,10 @@ rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt) WQ_MEM_RECLAIM | WQ_HIGHPRI, 0, r_xprt->rx_xprt.address_strings[RPC_DISPLAY_ADDR]); - if (!buf->rb_completion_wq) + if (!buf->rb_completion_wq) { + rc = -ENOMEM; goto out; + } return 0; out: diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 13559e6a460b..7754aa3e434f 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -48,6 +48,7 @@ #include <net/udp.h> #include <net/tcp.h> #include <linux/bvec.h> +#include <linux/highmem.h> #include <linux/uio.h> #include <trace/events/sunrpc.h> @@ -376,6 +377,26 @@ xs_read_discard(struct socket *sock, struct msghdr *msg, int flags, return sock_recvmsg(sock, msg, flags); } +#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE +static void +xs_flush_bvec(const struct bio_vec *bvec, size_t count, size_t seek) +{ + struct bvec_iter bi = { + .bi_size = count, + }; + struct bio_vec bv; + + bvec_iter_advance(bvec, &bi, seek & PAGE_MASK); + for_each_bvec(bv, bvec, bi, bi) + flush_dcache_page(bv.bv_page); +} +#else +static inline void +xs_flush_bvec(const struct bio_vec *bvec, size_t count, size_t seek) +{ +} +#endif + static ssize_t xs_read_xdr_buf(struct socket *sock, struct msghdr *msg, int flags, struct xdr_buf *buf, size_t count, size_t seek, size_t *read) @@ -409,6 +430,7 @@ xs_read_xdr_buf(struct socket *sock, struct msghdr *msg, int flags, seek + buf->page_base); if (ret <= 0) goto sock_err; + xs_flush_bvec(buf->bvec, ret, seek + buf->page_base); offset += ret - buf->page_base; if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC)) goto out; diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c index a264cf2accd0..d4de871e7d4d 100644 --- a/net/xdp/xdp_umem.c +++ b/net/xdp/xdp_umem.c @@ -41,13 +41,20 @@ void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs) * not know if the device has more tx queues than rx, or the opposite. * This might also change during run time. */ -static void xdp_reg_umem_at_qid(struct net_device *dev, struct xdp_umem *umem, - u16 queue_id) +static int xdp_reg_umem_at_qid(struct net_device *dev, struct xdp_umem *umem, + u16 queue_id) { + if (queue_id >= max_t(unsigned int, + dev->real_num_rx_queues, + dev->real_num_tx_queues)) + return -EINVAL; + if (queue_id < dev->real_num_rx_queues) dev->_rx[queue_id].umem = umem; if (queue_id < dev->real_num_tx_queues) dev->_tx[queue_id].umem = umem; + + return 0; } struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev, @@ -88,7 +95,10 @@ int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev, goto out_rtnl_unlock; } - xdp_reg_umem_at_qid(dev, umem, queue_id); + err = xdp_reg_umem_at_qid(dev, umem, queue_id); + if (err) + goto out_rtnl_unlock; + umem->dev = dev; umem->queue_id = queue_id; if (force_copy) diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile index 66ae15f27c70..db1a91dfa702 100644 --- a/samples/bpf/Makefile +++ b/samples/bpf/Makefile @@ -279,6 +279,7 @@ $(obj)/%.o: $(src)/%.c -Wno-gnu-variable-sized-type-not-at-end \ -Wno-address-of-packed-member -Wno-tautological-compare \ -Wno-unknown-warning-option $(CLANG_ARCH_ARGS) \ + -I$(srctree)/samples/bpf/ -include asm_goto_workaround.h \ -O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf $(LLC_FLAGS) -filetype=obj -o $@ ifeq ($(DWARF2BTF),y) $(BTF_PAHOLE) -J $@ diff --git a/samples/bpf/asm_goto_workaround.h b/samples/bpf/asm_goto_workaround.h new file mode 100644 index 000000000000..5cd7c1d1a5d5 --- /dev/null +++ b/samples/bpf/asm_goto_workaround.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2019 Facebook */ +#ifndef __ASM_GOTO_WORKAROUND_H +#define __ASM_GOTO_WORKAROUND_H + +/* this will bring in asm_volatile_goto macro definition + * if enabled by compiler and config options. + */ +#include <linux/types.h> + +#ifdef asm_volatile_goto +#undef asm_volatile_goto +#define asm_volatile_goto(x...) asm volatile("invalid use of asm_volatile_goto") +#endif + +#endif diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include index 525bff667a52..30816037036e 100644 --- a/scripts/Kbuild.include +++ b/scripts/Kbuild.include @@ -24,10 +24,6 @@ depfile = $(subst $(comma),_,$(dot-target).d) basetarget = $(basename $(notdir $@)) ### -# filename of first prerequisite with directory and extension stripped -baseprereq = $(basename $(notdir $<)) - -### # Escape single quote for use in echo statements escsq = $(subst $(squote),'\$(squote)',$1) diff --git a/scripts/gcc-plugins/arm_ssp_per_task_plugin.c b/scripts/gcc-plugins/arm_ssp_per_task_plugin.c index de70b8470971..89c47f57d1ce 100644 --- a/scripts/gcc-plugins/arm_ssp_per_task_plugin.c +++ b/scripts/gcc-plugins/arm_ssp_per_task_plugin.c @@ -13,7 +13,7 @@ static unsigned int arm_pertask_ssp_rtl_execute(void) for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) { const char *sym; rtx body; - rtx masked_sp; + rtx mask, masked_sp; /* * Find a SET insn involving a SYMBOL_REF to __stack_chk_guard @@ -33,12 +33,13 @@ static unsigned int arm_pertask_ssp_rtl_execute(void) * produces the address of the copy of the stack canary value * stored in struct thread_info */ + mask = GEN_INT(sext_hwi(sp_mask, GET_MODE_PRECISION(Pmode))); masked_sp = gen_reg_rtx(Pmode); emit_insn_before(gen_rtx_SET(masked_sp, gen_rtx_AND(Pmode, stack_pointer_rtx, - GEN_INT(sp_mask))), + mask)), insn); SET_SRC(body) = gen_rtx_PLUS(Pmode, masked_sp, @@ -52,6 +53,19 @@ static unsigned int arm_pertask_ssp_rtl_execute(void) #define NO_GATE #include "gcc-generate-rtl-pass.h" +#if BUILDING_GCC_VERSION >= 9000 +static bool no(void) +{ + return false; +} + +static void arm_pertask_ssp_start_unit(void *gcc_data, void *user_data) +{ + targetm.have_stack_protect_combined_set = no; + targetm.have_stack_protect_combined_test = no; +} +#endif + __visible int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version) { @@ -99,5 +113,10 @@ __visible int plugin_init(struct plugin_name_args *plugin_info, register_callback(plugin_info->base_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &arm_pertask_ssp_rtl_pass_info); +#if BUILDING_GCC_VERSION >= 9000 + register_callback(plugin_info->base_name, PLUGIN_START_UNIT, + arm_pertask_ssp_start_unit, NULL); +#endif + return 0; } diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile index c05ab001b54c..181973509a05 100644 --- a/scripts/kconfig/Makefile +++ b/scripts/kconfig/Makefile @@ -206,4 +206,4 @@ filechk_conf_cfg = $(CONFIG_SHELL) $< $(obj)/%conf-cfg: $(src)/%conf-cfg.sh FORCE $(call filechk,conf_cfg) -clean-files += conf-cfg +clean-files += *conf-cfg diff --git a/security/security.c b/security/security.c index f1b8d2587639..55bc49027ba9 100644 --- a/security/security.c +++ b/security/security.c @@ -1027,6 +1027,13 @@ int security_cred_alloc_blank(struct cred *cred, gfp_t gfp) void security_cred_free(struct cred *cred) { + /* + * There is a failure case in prepare_creds() that + * may result in a call here with ->security being NULL. + */ + if (unlikely(cred->security == NULL)) + return; + call_void_hook(cred_free, cred); } diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c index a50d625e7946..c1c31e33657a 100644 --- a/security/selinux/ss/policydb.c +++ b/security/selinux/ss/policydb.c @@ -732,7 +732,8 @@ static int sens_destroy(void *key, void *datum, void *p) kfree(key); if (datum) { levdatum = datum; - ebitmap_destroy(&levdatum->level->cat); + if (levdatum->level) + ebitmap_destroy(&levdatum->level->cat); kfree(levdatum->level); } kfree(datum); diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c index ffda91a4a1aa..02514fe558b4 100644 --- a/security/yama/yama_lsm.c +++ b/security/yama/yama_lsm.c @@ -368,7 +368,9 @@ static int yama_ptrace_access_check(struct task_struct *child, break; case YAMA_SCOPE_RELATIONAL: rcu_read_lock(); - if (!task_is_descendant(current, child) && + if (!pid_alive(child)) + rc = -EPERM; + if (!rc && !task_is_descendant(current, child) && !ptracer_exception_found(current, child) && !ns_capable(__task_cred(child)->user_ns, CAP_SYS_PTRACE)) rc = -EPERM; diff --git a/tools/arch/powerpc/include/uapi/asm/perf_regs.h b/tools/arch/powerpc/include/uapi/asm/perf_regs.h index ff91192407d1..f599064dd8dc 100644 --- a/tools/arch/powerpc/include/uapi/asm/perf_regs.h +++ b/tools/arch/powerpc/include/uapi/asm/perf_regs.h @@ -47,6 +47,7 @@ enum perf_event_powerpc_regs { PERF_REG_POWERPC_DAR, PERF_REG_POWERPC_DSISR, PERF_REG_POWERPC_SIER, + PERF_REG_POWERPC_MMCRA, PERF_REG_POWERPC_MAX, }; #endif /* _UAPI_ASM_POWERPC_PERF_REGS_H */ diff --git a/tools/bpf/bpftool/Makefile b/tools/bpf/bpftool/Makefile index 492f0f24e2d3..4ad1f0894d53 100644 --- a/tools/bpf/bpftool/Makefile +++ b/tools/bpf/bpftool/Makefile @@ -93,9 +93,16 @@ BFD_SRCS = jit_disasm.c SRCS = $(filter-out $(BFD_SRCS),$(wildcard *.c)) ifeq ($(feature-libbfd),1) + LIBS += -lbfd -ldl -lopcodes +else ifeq ($(feature-libbfd-liberty),1) + LIBS += -lbfd -ldl -lopcodes -liberty +else ifeq ($(feature-libbfd-liberty-z),1) + LIBS += -lbfd -ldl -lopcodes -liberty -lz +endif + +ifneq ($(filter -lbfd,$(LIBS)),) CFLAGS += -DHAVE_LIBBFD_SUPPORT SRCS += $(BFD_SRCS) -LIBS += -lbfd -lopcodes endif OBJS = $(patsubst %.c,$(OUTPUT)%.o,$(SRCS)) $(OUTPUT)disasm.o diff --git a/tools/bpf/bpftool/json_writer.c b/tools/bpf/bpftool/json_writer.c index bff7ee026680..6046dcab51cc 100644 --- a/tools/bpf/bpftool/json_writer.c +++ b/tools/bpf/bpftool/json_writer.c @@ -1,15 +1,10 @@ -// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-2-Clause) /* * Simple streaming JSON writer * * This takes care of the annoying bits of JSON syntax like the commas * after elements * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - * * Authors: Stephen Hemminger <stephen@networkplumber.org> */ diff --git a/tools/bpf/bpftool/json_writer.h b/tools/bpf/bpftool/json_writer.h index c1ab51aed99c..cb9a1993681c 100644 --- a/tools/bpf/bpftool/json_writer.h +++ b/tools/bpf/bpftool/json_writer.h @@ -5,11 +5,6 @@ * This takes care of the annoying bits of JSON syntax like the commas * after elements * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - * * Authors: Stephen Hemminger <stephen@networkplumber.org> */ diff --git a/tools/include/uapi/linux/pkt_sched.h b/tools/include/uapi/linux/pkt_sched.h new file mode 100644 index 000000000000..0d18b1d1fbbc --- /dev/null +++ b/tools/include/uapi/linux/pkt_sched.h @@ -0,0 +1,1163 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef __LINUX_PKT_SCHED_H +#define __LINUX_PKT_SCHED_H + +#include <linux/types.h> + +/* Logical priority bands not depending on specific packet scheduler. + Every scheduler will map them to real traffic classes, if it has + no more precise mechanism to classify packets. + + These numbers have no special meaning, though their coincidence + with obsolete IPv6 values is not occasional :-). New IPv6 drafts + preferred full anarchy inspired by diffserv group. + + Note: TC_PRIO_BESTEFFORT does not mean that it is the most unhappy + class, actually, as rule it will be handled with more care than + filler or even bulk. + */ + +#define TC_PRIO_BESTEFFORT 0 +#define TC_PRIO_FILLER 1 +#define TC_PRIO_BULK 2 +#define TC_PRIO_INTERACTIVE_BULK 4 +#define TC_PRIO_INTERACTIVE 6 +#define TC_PRIO_CONTROL 7 + +#define TC_PRIO_MAX 15 + +/* Generic queue statistics, available for all the elements. + Particular schedulers may have also their private records. + */ + +struct tc_stats { + __u64 bytes; /* Number of enqueued bytes */ + __u32 packets; /* Number of enqueued packets */ + __u32 drops; /* Packets dropped because of lack of resources */ + __u32 overlimits; /* Number of throttle events when this + * flow goes out of allocated bandwidth */ + __u32 bps; /* Current flow byte rate */ + __u32 pps; /* Current flow packet rate */ + __u32 qlen; + __u32 backlog; +}; + +struct tc_estimator { + signed char interval; + unsigned char ewma_log; +}; + +/* "Handles" + --------- + + All the traffic control objects have 32bit identifiers, or "handles". + + They can be considered as opaque numbers from user API viewpoint, + but actually they always consist of two fields: major and + minor numbers, which are interpreted by kernel specially, + that may be used by applications, though not recommended. + + F.e. qdisc handles always have minor number equal to zero, + classes (or flows) have major equal to parent qdisc major, and + minor uniquely identifying class inside qdisc. + + Macros to manipulate handles: + */ + +#define TC_H_MAJ_MASK (0xFFFF0000U) +#define TC_H_MIN_MASK (0x0000FFFFU) +#define TC_H_MAJ(h) ((h)&TC_H_MAJ_MASK) +#define TC_H_MIN(h) ((h)&TC_H_MIN_MASK) +#define TC_H_MAKE(maj,min) (((maj)&TC_H_MAJ_MASK)|((min)&TC_H_MIN_MASK)) + +#define TC_H_UNSPEC (0U) +#define TC_H_ROOT (0xFFFFFFFFU) +#define TC_H_INGRESS (0xFFFFFFF1U) +#define TC_H_CLSACT TC_H_INGRESS + +#define TC_H_MIN_PRIORITY 0xFFE0U +#define TC_H_MIN_INGRESS 0xFFF2U +#define TC_H_MIN_EGRESS 0xFFF3U + +/* Need to corrospond to iproute2 tc/tc_core.h "enum link_layer" */ +enum tc_link_layer { + TC_LINKLAYER_UNAWARE, /* Indicate unaware old iproute2 util */ + TC_LINKLAYER_ETHERNET, + TC_LINKLAYER_ATM, +}; +#define TC_LINKLAYER_MASK 0x0F /* limit use to lower 4 bits */ + +struct tc_ratespec { + unsigned char cell_log; + __u8 linklayer; /* lower 4 bits */ + unsigned short overhead; + short cell_align; + unsigned short mpu; + __u32 rate; +}; + +#define TC_RTAB_SIZE 1024 + +struct tc_sizespec { + unsigned char cell_log; + unsigned char size_log; + short cell_align; + int overhead; + unsigned int linklayer; + unsigned int mpu; + unsigned int mtu; + unsigned int tsize; +}; + +enum { + TCA_STAB_UNSPEC, + TCA_STAB_BASE, + TCA_STAB_DATA, + __TCA_STAB_MAX +}; + +#define TCA_STAB_MAX (__TCA_STAB_MAX - 1) + +/* FIFO section */ + +struct tc_fifo_qopt { + __u32 limit; /* Queue length: bytes for bfifo, packets for pfifo */ +}; + +/* SKBPRIO section */ + +/* + * Priorities go from zero to (SKBPRIO_MAX_PRIORITY - 1). + * SKBPRIO_MAX_PRIORITY should be at least 64 in order for skbprio to be able + * to map one to one the DS field of IPV4 and IPV6 headers. + * Memory allocation grows linearly with SKBPRIO_MAX_PRIORITY. + */ + +#define SKBPRIO_MAX_PRIORITY 64 + +struct tc_skbprio_qopt { + __u32 limit; /* Queue length in packets. */ +}; + +/* PRIO section */ + +#define TCQ_PRIO_BANDS 16 +#define TCQ_MIN_PRIO_BANDS 2 + +struct tc_prio_qopt { + int bands; /* Number of bands */ + __u8 priomap[TC_PRIO_MAX+1]; /* Map: logical priority -> PRIO band */ +}; + +/* MULTIQ section */ + +struct tc_multiq_qopt { + __u16 bands; /* Number of bands */ + __u16 max_bands; /* Maximum number of queues */ +}; + +/* PLUG section */ + +#define TCQ_PLUG_BUFFER 0 +#define TCQ_PLUG_RELEASE_ONE 1 +#define TCQ_PLUG_RELEASE_INDEFINITE 2 +#define TCQ_PLUG_LIMIT 3 + +struct tc_plug_qopt { + /* TCQ_PLUG_BUFFER: Inset a plug into the queue and + * buffer any incoming packets + * TCQ_PLUG_RELEASE_ONE: Dequeue packets from queue head + * to beginning of the next plug. + * TCQ_PLUG_RELEASE_INDEFINITE: Dequeue all packets from queue. + * Stop buffering packets until the next TCQ_PLUG_BUFFER + * command is received (just act as a pass-thru queue). + * TCQ_PLUG_LIMIT: Increase/decrease queue size + */ + int action; + __u32 limit; +}; + +/* TBF section */ + +struct tc_tbf_qopt { + struct tc_ratespec rate; + struct tc_ratespec peakrate; + __u32 limit; + __u32 buffer; + __u32 mtu; +}; + +enum { + TCA_TBF_UNSPEC, + TCA_TBF_PARMS, + TCA_TBF_RTAB, + TCA_TBF_PTAB, + TCA_TBF_RATE64, + TCA_TBF_PRATE64, + TCA_TBF_BURST, + TCA_TBF_PBURST, + TCA_TBF_PAD, + __TCA_TBF_MAX, +}; + +#define TCA_TBF_MAX (__TCA_TBF_MAX - 1) + + +/* TEQL section */ + +/* TEQL does not require any parameters */ + +/* SFQ section */ + +struct tc_sfq_qopt { + unsigned quantum; /* Bytes per round allocated to flow */ + int perturb_period; /* Period of hash perturbation */ + __u32 limit; /* Maximal packets in queue */ + unsigned divisor; /* Hash divisor */ + unsigned flows; /* Maximal number of flows */ +}; + +struct tc_sfqred_stats { + __u32 prob_drop; /* Early drops, below max threshold */ + __u32 forced_drop; /* Early drops, after max threshold */ + __u32 prob_mark; /* Marked packets, below max threshold */ + __u32 forced_mark; /* Marked packets, after max threshold */ + __u32 prob_mark_head; /* Marked packets, below max threshold */ + __u32 forced_mark_head;/* Marked packets, after max threshold */ +}; + +struct tc_sfq_qopt_v1 { + struct tc_sfq_qopt v0; + unsigned int depth; /* max number of packets per flow */ + unsigned int headdrop; +/* SFQRED parameters */ + __u32 limit; /* HARD maximal flow queue length (bytes) */ + __u32 qth_min; /* Min average length threshold (bytes) */ + __u32 qth_max; /* Max average length threshold (bytes) */ + unsigned char Wlog; /* log(W) */ + unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */ + unsigned char Scell_log; /* cell size for idle damping */ + unsigned char flags; + __u32 max_P; /* probability, high resolution */ +/* SFQRED stats */ + struct tc_sfqred_stats stats; +}; + + +struct tc_sfq_xstats { + __s32 allot; +}; + +/* RED section */ + +enum { + TCA_RED_UNSPEC, + TCA_RED_PARMS, + TCA_RED_STAB, + TCA_RED_MAX_P, + __TCA_RED_MAX, +}; + +#define TCA_RED_MAX (__TCA_RED_MAX - 1) + +struct tc_red_qopt { + __u32 limit; /* HARD maximal queue length (bytes) */ + __u32 qth_min; /* Min average length threshold (bytes) */ + __u32 qth_max; /* Max average length threshold (bytes) */ + unsigned char Wlog; /* log(W) */ + unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */ + unsigned char Scell_log; /* cell size for idle damping */ + unsigned char flags; +#define TC_RED_ECN 1 +#define TC_RED_HARDDROP 2 +#define TC_RED_ADAPTATIVE 4 +}; + +struct tc_red_xstats { + __u32 early; /* Early drops */ + __u32 pdrop; /* Drops due to queue limits */ + __u32 other; /* Drops due to drop() calls */ + __u32 marked; /* Marked packets */ +}; + +/* GRED section */ + +#define MAX_DPs 16 + +enum { + TCA_GRED_UNSPEC, + TCA_GRED_PARMS, + TCA_GRED_STAB, + TCA_GRED_DPS, + TCA_GRED_MAX_P, + TCA_GRED_LIMIT, + TCA_GRED_VQ_LIST, /* nested TCA_GRED_VQ_ENTRY */ + __TCA_GRED_MAX, +}; + +#define TCA_GRED_MAX (__TCA_GRED_MAX - 1) + +enum { + TCA_GRED_VQ_ENTRY_UNSPEC, + TCA_GRED_VQ_ENTRY, /* nested TCA_GRED_VQ_* */ + __TCA_GRED_VQ_ENTRY_MAX, +}; +#define TCA_GRED_VQ_ENTRY_MAX (__TCA_GRED_VQ_ENTRY_MAX - 1) + +enum { + TCA_GRED_VQ_UNSPEC, + TCA_GRED_VQ_PAD, + TCA_GRED_VQ_DP, /* u32 */ + TCA_GRED_VQ_STAT_BYTES, /* u64 */ + TCA_GRED_VQ_STAT_PACKETS, /* u32 */ + TCA_GRED_VQ_STAT_BACKLOG, /* u32 */ + TCA_GRED_VQ_STAT_PROB_DROP, /* u32 */ + TCA_GRED_VQ_STAT_PROB_MARK, /* u32 */ + TCA_GRED_VQ_STAT_FORCED_DROP, /* u32 */ + TCA_GRED_VQ_STAT_FORCED_MARK, /* u32 */ + TCA_GRED_VQ_STAT_PDROP, /* u32 */ + TCA_GRED_VQ_STAT_OTHER, /* u32 */ + TCA_GRED_VQ_FLAGS, /* u32 */ + __TCA_GRED_VQ_MAX +}; + +#define TCA_GRED_VQ_MAX (__TCA_GRED_VQ_MAX - 1) + +struct tc_gred_qopt { + __u32 limit; /* HARD maximal queue length (bytes) */ + __u32 qth_min; /* Min average length threshold (bytes) */ + __u32 qth_max; /* Max average length threshold (bytes) */ + __u32 DP; /* up to 2^32 DPs */ + __u32 backlog; + __u32 qave; + __u32 forced; + __u32 early; + __u32 other; + __u32 pdrop; + __u8 Wlog; /* log(W) */ + __u8 Plog; /* log(P_max/(qth_max-qth_min)) */ + __u8 Scell_log; /* cell size for idle damping */ + __u8 prio; /* prio of this VQ */ + __u32 packets; + __u32 bytesin; +}; + +/* gred setup */ +struct tc_gred_sopt { + __u32 DPs; + __u32 def_DP; + __u8 grio; + __u8 flags; + __u16 pad1; +}; + +/* CHOKe section */ + +enum { + TCA_CHOKE_UNSPEC, + TCA_CHOKE_PARMS, + TCA_CHOKE_STAB, + TCA_CHOKE_MAX_P, + __TCA_CHOKE_MAX, +}; + +#define TCA_CHOKE_MAX (__TCA_CHOKE_MAX - 1) + +struct tc_choke_qopt { + __u32 limit; /* Hard queue length (packets) */ + __u32 qth_min; /* Min average threshold (packets) */ + __u32 qth_max; /* Max average threshold (packets) */ + unsigned char Wlog; /* log(W) */ + unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */ + unsigned char Scell_log; /* cell size for idle damping */ + unsigned char flags; /* see RED flags */ +}; + +struct tc_choke_xstats { + __u32 early; /* Early drops */ + __u32 pdrop; /* Drops due to queue limits */ + __u32 other; /* Drops due to drop() calls */ + __u32 marked; /* Marked packets */ + __u32 matched; /* Drops due to flow match */ +}; + +/* HTB section */ +#define TC_HTB_NUMPRIO 8 +#define TC_HTB_MAXDEPTH 8 +#define TC_HTB_PROTOVER 3 /* the same as HTB and TC's major */ + +struct tc_htb_opt { + struct tc_ratespec rate; + struct tc_ratespec ceil; + __u32 buffer; + __u32 cbuffer; + __u32 quantum; + __u32 level; /* out only */ + __u32 prio; +}; +struct tc_htb_glob { + __u32 version; /* to match HTB/TC */ + __u32 rate2quantum; /* bps->quantum divisor */ + __u32 defcls; /* default class number */ + __u32 debug; /* debug flags */ + + /* stats */ + __u32 direct_pkts; /* count of non shaped packets */ +}; +enum { + TCA_HTB_UNSPEC, + TCA_HTB_PARMS, + TCA_HTB_INIT, + TCA_HTB_CTAB, + TCA_HTB_RTAB, + TCA_HTB_DIRECT_QLEN, + TCA_HTB_RATE64, + TCA_HTB_CEIL64, + TCA_HTB_PAD, + __TCA_HTB_MAX, +}; + +#define TCA_HTB_MAX (__TCA_HTB_MAX - 1) + +struct tc_htb_xstats { + __u32 lends; + __u32 borrows; + __u32 giants; /* unused since 'Make HTB scheduler work with TSO.' */ + __s32 tokens; + __s32 ctokens; +}; + +/* HFSC section */ + +struct tc_hfsc_qopt { + __u16 defcls; /* default class */ +}; + +struct tc_service_curve { + __u32 m1; /* slope of the first segment in bps */ + __u32 d; /* x-projection of the first segment in us */ + __u32 m2; /* slope of the second segment in bps */ +}; + +struct tc_hfsc_stats { + __u64 work; /* total work done */ + __u64 rtwork; /* work done by real-time criteria */ + __u32 period; /* current period */ + __u32 level; /* class level in hierarchy */ +}; + +enum { + TCA_HFSC_UNSPEC, + TCA_HFSC_RSC, + TCA_HFSC_FSC, + TCA_HFSC_USC, + __TCA_HFSC_MAX, +}; + +#define TCA_HFSC_MAX (__TCA_HFSC_MAX - 1) + + +/* CBQ section */ + +#define TC_CBQ_MAXPRIO 8 +#define TC_CBQ_MAXLEVEL 8 +#define TC_CBQ_DEF_EWMA 5 + +struct tc_cbq_lssopt { + unsigned char change; + unsigned char flags; +#define TCF_CBQ_LSS_BOUNDED 1 +#define TCF_CBQ_LSS_ISOLATED 2 + unsigned char ewma_log; + unsigned char level; +#define TCF_CBQ_LSS_FLAGS 1 +#define TCF_CBQ_LSS_EWMA 2 +#define TCF_CBQ_LSS_MAXIDLE 4 +#define TCF_CBQ_LSS_MINIDLE 8 +#define TCF_CBQ_LSS_OFFTIME 0x10 +#define TCF_CBQ_LSS_AVPKT 0x20 + __u32 maxidle; + __u32 minidle; + __u32 offtime; + __u32 avpkt; +}; + +struct tc_cbq_wrropt { + unsigned char flags; + unsigned char priority; + unsigned char cpriority; + unsigned char __reserved; + __u32 allot; + __u32 weight; +}; + +struct tc_cbq_ovl { + unsigned char strategy; +#define TC_CBQ_OVL_CLASSIC 0 +#define TC_CBQ_OVL_DELAY 1 +#define TC_CBQ_OVL_LOWPRIO 2 +#define TC_CBQ_OVL_DROP 3 +#define TC_CBQ_OVL_RCLASSIC 4 + unsigned char priority2; + __u16 pad; + __u32 penalty; +}; + +struct tc_cbq_police { + unsigned char police; + unsigned char __res1; + unsigned short __res2; +}; + +struct tc_cbq_fopt { + __u32 split; + __u32 defmap; + __u32 defchange; +}; + +struct tc_cbq_xstats { + __u32 borrows; + __u32 overactions; + __s32 avgidle; + __s32 undertime; +}; + +enum { + TCA_CBQ_UNSPEC, + TCA_CBQ_LSSOPT, + TCA_CBQ_WRROPT, + TCA_CBQ_FOPT, + TCA_CBQ_OVL_STRATEGY, + TCA_CBQ_RATE, + TCA_CBQ_RTAB, + TCA_CBQ_POLICE, + __TCA_CBQ_MAX, +}; + +#define TCA_CBQ_MAX (__TCA_CBQ_MAX - 1) + +/* dsmark section */ + +enum { + TCA_DSMARK_UNSPEC, + TCA_DSMARK_INDICES, + TCA_DSMARK_DEFAULT_INDEX, + TCA_DSMARK_SET_TC_INDEX, + TCA_DSMARK_MASK, + TCA_DSMARK_VALUE, + __TCA_DSMARK_MAX, +}; + +#define TCA_DSMARK_MAX (__TCA_DSMARK_MAX - 1) + +/* ATM section */ + +enum { + TCA_ATM_UNSPEC, + TCA_ATM_FD, /* file/socket descriptor */ + TCA_ATM_PTR, /* pointer to descriptor - later */ + TCA_ATM_HDR, /* LL header */ + TCA_ATM_EXCESS, /* excess traffic class (0 for CLP) */ + TCA_ATM_ADDR, /* PVC address (for output only) */ + TCA_ATM_STATE, /* VC state (ATM_VS_*; for output only) */ + __TCA_ATM_MAX, +}; + +#define TCA_ATM_MAX (__TCA_ATM_MAX - 1) + +/* Network emulator */ + +enum { + TCA_NETEM_UNSPEC, + TCA_NETEM_CORR, + TCA_NETEM_DELAY_DIST, + TCA_NETEM_REORDER, + TCA_NETEM_CORRUPT, + TCA_NETEM_LOSS, + TCA_NETEM_RATE, + TCA_NETEM_ECN, + TCA_NETEM_RATE64, + TCA_NETEM_PAD, + TCA_NETEM_LATENCY64, + TCA_NETEM_JITTER64, + TCA_NETEM_SLOT, + TCA_NETEM_SLOT_DIST, + __TCA_NETEM_MAX, +}; + +#define TCA_NETEM_MAX (__TCA_NETEM_MAX - 1) + +struct tc_netem_qopt { + __u32 latency; /* added delay (us) */ + __u32 limit; /* fifo limit (packets) */ + __u32 loss; /* random packet loss (0=none ~0=100%) */ + __u32 gap; /* re-ordering gap (0 for none) */ + __u32 duplicate; /* random packet dup (0=none ~0=100%) */ + __u32 jitter; /* random jitter in latency (us) */ +}; + +struct tc_netem_corr { + __u32 delay_corr; /* delay correlation */ + __u32 loss_corr; /* packet loss correlation */ + __u32 dup_corr; /* duplicate correlation */ +}; + +struct tc_netem_reorder { + __u32 probability; + __u32 correlation; +}; + +struct tc_netem_corrupt { + __u32 probability; + __u32 correlation; +}; + +struct tc_netem_rate { + __u32 rate; /* byte/s */ + __s32 packet_overhead; + __u32 cell_size; + __s32 cell_overhead; +}; + +struct tc_netem_slot { + __s64 min_delay; /* nsec */ + __s64 max_delay; + __s32 max_packets; + __s32 max_bytes; + __s64 dist_delay; /* nsec */ + __s64 dist_jitter; /* nsec */ +}; + +enum { + NETEM_LOSS_UNSPEC, + NETEM_LOSS_GI, /* General Intuitive - 4 state model */ + NETEM_LOSS_GE, /* Gilbert Elliot models */ + __NETEM_LOSS_MAX +}; +#define NETEM_LOSS_MAX (__NETEM_LOSS_MAX - 1) + +/* State transition probabilities for 4 state model */ +struct tc_netem_gimodel { + __u32 p13; + __u32 p31; + __u32 p32; + __u32 p14; + __u32 p23; +}; + +/* Gilbert-Elliot models */ +struct tc_netem_gemodel { + __u32 p; + __u32 r; + __u32 h; + __u32 k1; +}; + +#define NETEM_DIST_SCALE 8192 +#define NETEM_DIST_MAX 16384 + +/* DRR */ + +enum { + TCA_DRR_UNSPEC, + TCA_DRR_QUANTUM, + __TCA_DRR_MAX +}; + +#define TCA_DRR_MAX (__TCA_DRR_MAX - 1) + +struct tc_drr_stats { + __u32 deficit; +}; + +/* MQPRIO */ +#define TC_QOPT_BITMASK 15 +#define TC_QOPT_MAX_QUEUE 16 + +enum { + TC_MQPRIO_HW_OFFLOAD_NONE, /* no offload requested */ + TC_MQPRIO_HW_OFFLOAD_TCS, /* offload TCs, no queue counts */ + __TC_MQPRIO_HW_OFFLOAD_MAX +}; + +#define TC_MQPRIO_HW_OFFLOAD_MAX (__TC_MQPRIO_HW_OFFLOAD_MAX - 1) + +enum { + TC_MQPRIO_MODE_DCB, + TC_MQPRIO_MODE_CHANNEL, + __TC_MQPRIO_MODE_MAX +}; + +#define __TC_MQPRIO_MODE_MAX (__TC_MQPRIO_MODE_MAX - 1) + +enum { + TC_MQPRIO_SHAPER_DCB, + TC_MQPRIO_SHAPER_BW_RATE, /* Add new shapers below */ + __TC_MQPRIO_SHAPER_MAX +}; + +#define __TC_MQPRIO_SHAPER_MAX (__TC_MQPRIO_SHAPER_MAX - 1) + +struct tc_mqprio_qopt { + __u8 num_tc; + __u8 prio_tc_map[TC_QOPT_BITMASK + 1]; + __u8 hw; + __u16 count[TC_QOPT_MAX_QUEUE]; + __u16 offset[TC_QOPT_MAX_QUEUE]; +}; + +#define TC_MQPRIO_F_MODE 0x1 +#define TC_MQPRIO_F_SHAPER 0x2 +#define TC_MQPRIO_F_MIN_RATE 0x4 +#define TC_MQPRIO_F_MAX_RATE 0x8 + +enum { + TCA_MQPRIO_UNSPEC, + TCA_MQPRIO_MODE, + TCA_MQPRIO_SHAPER, + TCA_MQPRIO_MIN_RATE64, + TCA_MQPRIO_MAX_RATE64, + __TCA_MQPRIO_MAX, +}; + +#define TCA_MQPRIO_MAX (__TCA_MQPRIO_MAX - 1) + +/* SFB */ + +enum { + TCA_SFB_UNSPEC, + TCA_SFB_PARMS, + __TCA_SFB_MAX, +}; + +#define TCA_SFB_MAX (__TCA_SFB_MAX - 1) + +/* + * Note: increment, decrement are Q0.16 fixed-point values. + */ +struct tc_sfb_qopt { + __u32 rehash_interval; /* delay between hash move, in ms */ + __u32 warmup_time; /* double buffering warmup time in ms (warmup_time < rehash_interval) */ + __u32 max; /* max len of qlen_min */ + __u32 bin_size; /* maximum queue length per bin */ + __u32 increment; /* probability increment, (d1 in Blue) */ + __u32 decrement; /* probability decrement, (d2 in Blue) */ + __u32 limit; /* max SFB queue length */ + __u32 penalty_rate; /* inelastic flows are rate limited to 'rate' pps */ + __u32 penalty_burst; +}; + +struct tc_sfb_xstats { + __u32 earlydrop; + __u32 penaltydrop; + __u32 bucketdrop; + __u32 queuedrop; + __u32 childdrop; /* drops in child qdisc */ + __u32 marked; + __u32 maxqlen; + __u32 maxprob; + __u32 avgprob; +}; + +#define SFB_MAX_PROB 0xFFFF + +/* QFQ */ +enum { + TCA_QFQ_UNSPEC, + TCA_QFQ_WEIGHT, + TCA_QFQ_LMAX, + __TCA_QFQ_MAX +}; + +#define TCA_QFQ_MAX (__TCA_QFQ_MAX - 1) + +struct tc_qfq_stats { + __u32 weight; + __u32 lmax; +}; + +/* CODEL */ + +enum { + TCA_CODEL_UNSPEC, + TCA_CODEL_TARGET, + TCA_CODEL_LIMIT, + TCA_CODEL_INTERVAL, + TCA_CODEL_ECN, + TCA_CODEL_CE_THRESHOLD, + __TCA_CODEL_MAX +}; + +#define TCA_CODEL_MAX (__TCA_CODEL_MAX - 1) + +struct tc_codel_xstats { + __u32 maxpacket; /* largest packet we've seen so far */ + __u32 count; /* how many drops we've done since the last time we + * entered dropping state + */ + __u32 lastcount; /* count at entry to dropping state */ + __u32 ldelay; /* in-queue delay seen by most recently dequeued packet */ + __s32 drop_next; /* time to drop next packet */ + __u32 drop_overlimit; /* number of time max qdisc packet limit was hit */ + __u32 ecn_mark; /* number of packets we ECN marked instead of dropped */ + __u32 dropping; /* are we in dropping state ? */ + __u32 ce_mark; /* number of CE marked packets because of ce_threshold */ +}; + +/* FQ_CODEL */ + +enum { + TCA_FQ_CODEL_UNSPEC, + TCA_FQ_CODEL_TARGET, + TCA_FQ_CODEL_LIMIT, + TCA_FQ_CODEL_INTERVAL, + TCA_FQ_CODEL_ECN, + TCA_FQ_CODEL_FLOWS, + TCA_FQ_CODEL_QUANTUM, + TCA_FQ_CODEL_CE_THRESHOLD, + TCA_FQ_CODEL_DROP_BATCH_SIZE, + TCA_FQ_CODEL_MEMORY_LIMIT, + __TCA_FQ_CODEL_MAX +}; + +#define TCA_FQ_CODEL_MAX (__TCA_FQ_CODEL_MAX - 1) + +enum { + TCA_FQ_CODEL_XSTATS_QDISC, + TCA_FQ_CODEL_XSTATS_CLASS, +}; + +struct tc_fq_codel_qd_stats { + __u32 maxpacket; /* largest packet we've seen so far */ + __u32 drop_overlimit; /* number of time max qdisc + * packet limit was hit + */ + __u32 ecn_mark; /* number of packets we ECN marked + * instead of being dropped + */ + __u32 new_flow_count; /* number of time packets + * created a 'new flow' + */ + __u32 new_flows_len; /* count of flows in new list */ + __u32 old_flows_len; /* count of flows in old list */ + __u32 ce_mark; /* packets above ce_threshold */ + __u32 memory_usage; /* in bytes */ + __u32 drop_overmemory; +}; + +struct tc_fq_codel_cl_stats { + __s32 deficit; + __u32 ldelay; /* in-queue delay seen by most recently + * dequeued packet + */ + __u32 count; + __u32 lastcount; + __u32 dropping; + __s32 drop_next; +}; + +struct tc_fq_codel_xstats { + __u32 type; + union { + struct tc_fq_codel_qd_stats qdisc_stats; + struct tc_fq_codel_cl_stats class_stats; + }; +}; + +/* FQ */ + +enum { + TCA_FQ_UNSPEC, + + TCA_FQ_PLIMIT, /* limit of total number of packets in queue */ + + TCA_FQ_FLOW_PLIMIT, /* limit of packets per flow */ + + TCA_FQ_QUANTUM, /* RR quantum */ + + TCA_FQ_INITIAL_QUANTUM, /* RR quantum for new flow */ + + TCA_FQ_RATE_ENABLE, /* enable/disable rate limiting */ + + TCA_FQ_FLOW_DEFAULT_RATE,/* obsolete, do not use */ + + TCA_FQ_FLOW_MAX_RATE, /* per flow max rate */ + + TCA_FQ_BUCKETS_LOG, /* log2(number of buckets) */ + + TCA_FQ_FLOW_REFILL_DELAY, /* flow credit refill delay in usec */ + + TCA_FQ_ORPHAN_MASK, /* mask applied to orphaned skb hashes */ + + TCA_FQ_LOW_RATE_THRESHOLD, /* per packet delay under this rate */ + + TCA_FQ_CE_THRESHOLD, /* DCTCP-like CE-marking threshold */ + + __TCA_FQ_MAX +}; + +#define TCA_FQ_MAX (__TCA_FQ_MAX - 1) + +struct tc_fq_qd_stats { + __u64 gc_flows; + __u64 highprio_packets; + __u64 tcp_retrans; + __u64 throttled; + __u64 flows_plimit; + __u64 pkts_too_long; + __u64 allocation_errors; + __s64 time_next_delayed_flow; + __u32 flows; + __u32 inactive_flows; + __u32 throttled_flows; + __u32 unthrottle_latency_ns; + __u64 ce_mark; /* packets above ce_threshold */ +}; + +/* Heavy-Hitter Filter */ + +enum { + TCA_HHF_UNSPEC, + TCA_HHF_BACKLOG_LIMIT, + TCA_HHF_QUANTUM, + TCA_HHF_HH_FLOWS_LIMIT, + TCA_HHF_RESET_TIMEOUT, + TCA_HHF_ADMIT_BYTES, + TCA_HHF_EVICT_TIMEOUT, + TCA_HHF_NON_HH_WEIGHT, + __TCA_HHF_MAX +}; + +#define TCA_HHF_MAX (__TCA_HHF_MAX - 1) + +struct tc_hhf_xstats { + __u32 drop_overlimit; /* number of times max qdisc packet limit + * was hit + */ + __u32 hh_overlimit; /* number of times max heavy-hitters was hit */ + __u32 hh_tot_count; /* number of captured heavy-hitters so far */ + __u32 hh_cur_count; /* number of current heavy-hitters */ +}; + +/* PIE */ +enum { + TCA_PIE_UNSPEC, + TCA_PIE_TARGET, + TCA_PIE_LIMIT, + TCA_PIE_TUPDATE, + TCA_PIE_ALPHA, + TCA_PIE_BETA, + TCA_PIE_ECN, + TCA_PIE_BYTEMODE, + __TCA_PIE_MAX +}; +#define TCA_PIE_MAX (__TCA_PIE_MAX - 1) + +struct tc_pie_xstats { + __u32 prob; /* current probability */ + __u32 delay; /* current delay in ms */ + __u32 avg_dq_rate; /* current average dq_rate in bits/pie_time */ + __u32 packets_in; /* total number of packets enqueued */ + __u32 dropped; /* packets dropped due to pie_action */ + __u32 overlimit; /* dropped due to lack of space in queue */ + __u32 maxq; /* maximum queue size */ + __u32 ecn_mark; /* packets marked with ecn*/ +}; + +/* CBS */ +struct tc_cbs_qopt { + __u8 offload; + __u8 _pad[3]; + __s32 hicredit; + __s32 locredit; + __s32 idleslope; + __s32 sendslope; +}; + +enum { + TCA_CBS_UNSPEC, + TCA_CBS_PARMS, + __TCA_CBS_MAX, +}; + +#define TCA_CBS_MAX (__TCA_CBS_MAX - 1) + + +/* ETF */ +struct tc_etf_qopt { + __s32 delta; + __s32 clockid; + __u32 flags; +#define TC_ETF_DEADLINE_MODE_ON BIT(0) +#define TC_ETF_OFFLOAD_ON BIT(1) +}; + +enum { + TCA_ETF_UNSPEC, + TCA_ETF_PARMS, + __TCA_ETF_MAX, +}; + +#define TCA_ETF_MAX (__TCA_ETF_MAX - 1) + + +/* CAKE */ +enum { + TCA_CAKE_UNSPEC, + TCA_CAKE_PAD, + TCA_CAKE_BASE_RATE64, + TCA_CAKE_DIFFSERV_MODE, + TCA_CAKE_ATM, + TCA_CAKE_FLOW_MODE, + TCA_CAKE_OVERHEAD, + TCA_CAKE_RTT, + TCA_CAKE_TARGET, + TCA_CAKE_AUTORATE, + TCA_CAKE_MEMORY, + TCA_CAKE_NAT, + TCA_CAKE_RAW, + TCA_CAKE_WASH, + TCA_CAKE_MPU, + TCA_CAKE_INGRESS, + TCA_CAKE_ACK_FILTER, + TCA_CAKE_SPLIT_GSO, + __TCA_CAKE_MAX +}; +#define TCA_CAKE_MAX (__TCA_CAKE_MAX - 1) + +enum { + __TCA_CAKE_STATS_INVALID, + TCA_CAKE_STATS_PAD, + TCA_CAKE_STATS_CAPACITY_ESTIMATE64, + TCA_CAKE_STATS_MEMORY_LIMIT, + TCA_CAKE_STATS_MEMORY_USED, + TCA_CAKE_STATS_AVG_NETOFF, + TCA_CAKE_STATS_MIN_NETLEN, + TCA_CAKE_STATS_MAX_NETLEN, + TCA_CAKE_STATS_MIN_ADJLEN, + TCA_CAKE_STATS_MAX_ADJLEN, + TCA_CAKE_STATS_TIN_STATS, + TCA_CAKE_STATS_DEFICIT, + TCA_CAKE_STATS_COBALT_COUNT, + TCA_CAKE_STATS_DROPPING, + TCA_CAKE_STATS_DROP_NEXT_US, + TCA_CAKE_STATS_P_DROP, + TCA_CAKE_STATS_BLUE_TIMER_US, + __TCA_CAKE_STATS_MAX +}; +#define TCA_CAKE_STATS_MAX (__TCA_CAKE_STATS_MAX - 1) + +enum { + __TCA_CAKE_TIN_STATS_INVALID, + TCA_CAKE_TIN_STATS_PAD, + TCA_CAKE_TIN_STATS_SENT_PACKETS, + TCA_CAKE_TIN_STATS_SENT_BYTES64, + TCA_CAKE_TIN_STATS_DROPPED_PACKETS, + TCA_CAKE_TIN_STATS_DROPPED_BYTES64, + TCA_CAKE_TIN_STATS_ACKS_DROPPED_PACKETS, + TCA_CAKE_TIN_STATS_ACKS_DROPPED_BYTES64, + TCA_CAKE_TIN_STATS_ECN_MARKED_PACKETS, + TCA_CAKE_TIN_STATS_ECN_MARKED_BYTES64, + TCA_CAKE_TIN_STATS_BACKLOG_PACKETS, + TCA_CAKE_TIN_STATS_BACKLOG_BYTES, + TCA_CAKE_TIN_STATS_THRESHOLD_RATE64, + TCA_CAKE_TIN_STATS_TARGET_US, + TCA_CAKE_TIN_STATS_INTERVAL_US, + TCA_CAKE_TIN_STATS_WAY_INDIRECT_HITS, + TCA_CAKE_TIN_STATS_WAY_MISSES, + TCA_CAKE_TIN_STATS_WAY_COLLISIONS, + TCA_CAKE_TIN_STATS_PEAK_DELAY_US, + TCA_CAKE_TIN_STATS_AVG_DELAY_US, + TCA_CAKE_TIN_STATS_BASE_DELAY_US, + TCA_CAKE_TIN_STATS_SPARSE_FLOWS, + TCA_CAKE_TIN_STATS_BULK_FLOWS, + TCA_CAKE_TIN_STATS_UNRESPONSIVE_FLOWS, + TCA_CAKE_TIN_STATS_MAX_SKBLEN, + TCA_CAKE_TIN_STATS_FLOW_QUANTUM, + __TCA_CAKE_TIN_STATS_MAX +}; +#define TCA_CAKE_TIN_STATS_MAX (__TCA_CAKE_TIN_STATS_MAX - 1) +#define TC_CAKE_MAX_TINS (8) + +enum { + CAKE_FLOW_NONE = 0, + CAKE_FLOW_SRC_IP, + CAKE_FLOW_DST_IP, + CAKE_FLOW_HOSTS, /* = CAKE_FLOW_SRC_IP | CAKE_FLOW_DST_IP */ + CAKE_FLOW_FLOWS, + CAKE_FLOW_DUAL_SRC, /* = CAKE_FLOW_SRC_IP | CAKE_FLOW_FLOWS */ + CAKE_FLOW_DUAL_DST, /* = CAKE_FLOW_DST_IP | CAKE_FLOW_FLOWS */ + CAKE_FLOW_TRIPLE, /* = CAKE_FLOW_HOSTS | CAKE_FLOW_FLOWS */ + CAKE_FLOW_MAX, +}; + +enum { + CAKE_DIFFSERV_DIFFSERV3 = 0, + CAKE_DIFFSERV_DIFFSERV4, + CAKE_DIFFSERV_DIFFSERV8, + CAKE_DIFFSERV_BESTEFFORT, + CAKE_DIFFSERV_PRECEDENCE, + CAKE_DIFFSERV_MAX +}; + +enum { + CAKE_ACK_NONE = 0, + CAKE_ACK_FILTER, + CAKE_ACK_AGGRESSIVE, + CAKE_ACK_MAX +}; + +enum { + CAKE_ATM_NONE = 0, + CAKE_ATM_ATM, + CAKE_ATM_PTM, + CAKE_ATM_MAX +}; + + +/* TAPRIO */ +enum { + TC_TAPRIO_CMD_SET_GATES = 0x00, + TC_TAPRIO_CMD_SET_AND_HOLD = 0x01, + TC_TAPRIO_CMD_SET_AND_RELEASE = 0x02, +}; + +enum { + TCA_TAPRIO_SCHED_ENTRY_UNSPEC, + TCA_TAPRIO_SCHED_ENTRY_INDEX, /* u32 */ + TCA_TAPRIO_SCHED_ENTRY_CMD, /* u8 */ + TCA_TAPRIO_SCHED_ENTRY_GATE_MASK, /* u32 */ + TCA_TAPRIO_SCHED_ENTRY_INTERVAL, /* u32 */ + __TCA_TAPRIO_SCHED_ENTRY_MAX, +}; +#define TCA_TAPRIO_SCHED_ENTRY_MAX (__TCA_TAPRIO_SCHED_ENTRY_MAX - 1) + +/* The format for schedule entry list is: + * [TCA_TAPRIO_SCHED_ENTRY_LIST] + * [TCA_TAPRIO_SCHED_ENTRY] + * [TCA_TAPRIO_SCHED_ENTRY_CMD] + * [TCA_TAPRIO_SCHED_ENTRY_GATES] + * [TCA_TAPRIO_SCHED_ENTRY_INTERVAL] + */ +enum { + TCA_TAPRIO_SCHED_UNSPEC, + TCA_TAPRIO_SCHED_ENTRY, + __TCA_TAPRIO_SCHED_MAX, +}; + +#define TCA_TAPRIO_SCHED_MAX (__TCA_TAPRIO_SCHED_MAX - 1) + +enum { + TCA_TAPRIO_ATTR_UNSPEC, + TCA_TAPRIO_ATTR_PRIOMAP, /* struct tc_mqprio_qopt */ + TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST, /* nested of entry */ + TCA_TAPRIO_ATTR_SCHED_BASE_TIME, /* s64 */ + TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY, /* single entry */ + TCA_TAPRIO_ATTR_SCHED_CLOCKID, /* s32 */ + TCA_TAPRIO_PAD, + __TCA_TAPRIO_ATTR_MAX, +}; + +#define TCA_TAPRIO_ATTR_MAX (__TCA_TAPRIO_ATTR_MAX - 1) + +#endif diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index 3caaa3428774..88cbd110ae58 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c @@ -65,6 +65,17 @@ static inline int sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr, return syscall(__NR_bpf, cmd, attr, size); } +static inline int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size) +{ + int fd; + + do { + fd = sys_bpf(BPF_PROG_LOAD, attr, size); + } while (fd < 0 && errno == EAGAIN); + + return fd; +} + int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr) { __u32 name_len = create_attr->name ? strlen(create_attr->name) : 0; @@ -232,7 +243,7 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr, memcpy(attr.prog_name, load_attr->name, min(name_len, BPF_OBJ_NAME_LEN - 1)); - fd = sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr)); + fd = sys_bpf_prog_load(&attr, sizeof(attr)); if (fd >= 0) return fd; @@ -269,7 +280,7 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr, break; } - fd = sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr)); + fd = sys_bpf_prog_load(&attr, sizeof(attr)); if (fd >= 0) goto done; @@ -283,7 +294,7 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr, attr.log_size = log_buf_sz; attr.log_level = 1; log_buf[0] = 0; - fd = sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr)); + fd = sys_bpf_prog_load(&attr, sizeof(attr)); done: free(finfo); free(linfo); @@ -328,7 +339,7 @@ int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns, attr.kern_version = kern_version; attr.prog_flags = prog_flags; - return sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr)); + return sys_bpf_prog_load(&attr, sizeof(attr)); } int bpf_map_update_elem(int fd, const void *key, const void *value, diff --git a/tools/perf/arch/powerpc/include/perf_regs.h b/tools/perf/arch/powerpc/include/perf_regs.h index 1076393e6f43..e18a3556f5e3 100644 --- a/tools/perf/arch/powerpc/include/perf_regs.h +++ b/tools/perf/arch/powerpc/include/perf_regs.h @@ -63,7 +63,8 @@ static const char *reg_names[] = { [PERF_REG_POWERPC_TRAP] = "trap", [PERF_REG_POWERPC_DAR] = "dar", [PERF_REG_POWERPC_DSISR] = "dsisr", - [PERF_REG_POWERPC_SIER] = "sier" + [PERF_REG_POWERPC_SIER] = "sier", + [PERF_REG_POWERPC_MMCRA] = "mmcra" }; static inline const char *perf_reg_name(int id) diff --git a/tools/perf/arch/powerpc/util/perf_regs.c b/tools/perf/arch/powerpc/util/perf_regs.c index 07fcd977d93e..34d5134681d9 100644 --- a/tools/perf/arch/powerpc/util/perf_regs.c +++ b/tools/perf/arch/powerpc/util/perf_regs.c @@ -53,6 +53,7 @@ const struct sample_reg sample_reg_masks[] = { SMPL_REG(dar, PERF_REG_POWERPC_DAR), SMPL_REG(dsisr, PERF_REG_POWERPC_DSISR), SMPL_REG(sier, PERF_REG_POWERPC_SIER), + SMPL_REG(mmcra, PERF_REG_POWERPC_MMCRA), SMPL_REG_END }; diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 70229de510f5..41ab7a3668b3 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -56,6 +56,7 @@ TEST_PROGS := test_kmod.sh \ test_xdp_vlan.sh TEST_PROGS_EXTENDED := with_addr.sh \ + with_tunnels.sh \ tcp_client.py \ tcp_server.py diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c index 126fc624290d..25f0083a9b2e 100644 --- a/tools/testing/selftests/bpf/test_progs.c +++ b/tools/testing/selftests/bpf/test_progs.c @@ -1188,7 +1188,9 @@ static void test_stacktrace_build_id(void) int i, j; struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH]; int build_id_matches = 0; + int retry = 1; +retry: err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd); if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno)) goto out; @@ -1301,6 +1303,19 @@ static void test_stacktrace_build_id(void) previous_key = key; } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0); + /* stack_map_get_build_id_offset() is racy and sometimes can return + * BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID; + * try it one more time. + */ + if (build_id_matches < 1 && retry--) { + ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE); + close(pmu_fd); + bpf_object__close(obj); + printf("%s:WARN:Didn't find expected build ID from the map, retrying\n", + __func__); + goto retry; + } + if (CHECK(build_id_matches < 1, "build id match", "Didn't find expected build ID from the map\n")) goto disable_pmu; @@ -1341,7 +1356,9 @@ static void test_stacktrace_build_id_nmi(void) int i, j; struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH]; int build_id_matches = 0; + int retry = 1; +retry: err = bpf_prog_load(file, BPF_PROG_TYPE_PERF_EVENT, &obj, &prog_fd); if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno)) return; @@ -1436,6 +1453,19 @@ static void test_stacktrace_build_id_nmi(void) previous_key = key; } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0); + /* stack_map_get_build_id_offset() is racy and sometimes can return + * BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID; + * try it one more time. + */ + if (build_id_matches < 1 && retry--) { + ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE); + close(pmu_fd); + bpf_object__close(obj); + printf("%s:WARN:Didn't find expected build ID from the map, retrying\n", + __func__); + goto retry; + } + if (CHECK(build_id_matches < 1, "build id match", "Didn't find expected build ID from the map\n")) goto disable_pmu; |